From 43320c392b7addb209b4cfbfbaec19d87237ca46 Mon Sep 17 00:00:00 2001 From: Jeongyeon Nam Date: Mon, 21 Nov 2022 07:57:19 +0000 Subject: [PATCH] add deformable detr repo --- Deformable-DETR/LICENSE | 220 +++ Deformable-DETR/README.md | 169 +++ Deformable-DETR/benchmark.py | 67 + .../configs/r50_deformable_detr.sh | 10 + ...ble_detr_plus_iterative_bbox_refinement.sh | 11 + ...ive_bbox_refinement_plus_plus_two_stage.sh | 12 + .../r50_deformable_detr_single_scale.sh | 11 + .../r50_deformable_detr_single_scale_dc5.sh | 12 + Deformable-DETR/datasets/__init__.py | 33 + Deformable-DETR/datasets/coco.py | 169 +++ Deformable-DETR/datasets/coco_eval.py | 265 ++++ Deformable-DETR/datasets/coco_panoptic.py | 107 ++ Deformable-DETR/datasets/data_prefetcher.py | 70 + Deformable-DETR/datasets/panoptic_eval.py | 52 + Deformable-DETR/datasets/samplers.py | 139 ++ .../datasets/torchvision_datasets/__init__.py | 7 + .../datasets/torchvision_datasets/coco.py | 84 ++ Deformable-DETR/datasets/transforms.py | 284 ++++ Deformable-DETR/docs/changelog.md | 3 + Deformable-DETR/engine.py | 166 +++ Deformable-DETR/figs/convergence.png | Bin 0 -> 54636 bytes Deformable-DETR/figs/illustration.png | Bin 0 -> 272167 bytes Deformable-DETR/main.py | 326 ++++ Deformable-DETR/models/__init__.py | 15 + Deformable-DETR/models/backbone.py | 138 ++ Deformable-DETR/models/deformable_detr.py | 492 ++++++ .../models/deformable_transformer.py | 394 +++++ Deformable-DETR/models/matcher.py | 102 ++ .../models/ops/functions/__init__.py | 10 + .../ops/functions/ms_deform_attn_func.py | 61 + Deformable-DETR/models/ops/make.sh | 10 + .../models/ops/modules/__init__.py | 9 + .../models/ops/modules/ms_deform_attn.py | 115 ++ Deformable-DETR/models/ops/setup.py | 71 + .../models/ops/src/cpu/ms_deform_attn_cpu.cpp | 41 + .../models/ops/src/cpu/ms_deform_attn_cpu.h | 33 + .../ops/src/cuda/ms_deform_attn_cuda.cu | 153 ++ .../models/ops/src/cuda/ms_deform_attn_cuda.h | 30 + .../ops/src/cuda/ms_deform_im2col_cuda.cuh | 1327 +++++++++++++++++ .../models/ops/src/ms_deform_attn.h | 62 + Deformable-DETR/models/ops/src/vision.cpp | 16 + Deformable-DETR/models/ops/test.py | 89 ++ Deformable-DETR/models/position_encoding.py | 97 ++ Deformable-DETR/models/segmentation.py | 369 +++++ Deformable-DETR/requirements.txt | 4 + Deformable-DETR/tools/launch.py | 192 +++ Deformable-DETR/tools/run_dist_launch.sh | 29 + Deformable-DETR/tools/run_dist_slurm.sh | 33 + Deformable-DETR/util/__init__.py | 8 + Deformable-DETR/util/box_ops.py | 96 ++ Deformable-DETR/util/misc.py | 518 +++++++ Deformable-DETR/util/plot_utils.py | 111 ++ 52 files changed, 6842 insertions(+) create mode 100644 Deformable-DETR/LICENSE create mode 100644 Deformable-DETR/README.md create mode 100644 Deformable-DETR/benchmark.py create mode 100755 Deformable-DETR/configs/r50_deformable_detr.sh create mode 100755 Deformable-DETR/configs/r50_deformable_detr_plus_iterative_bbox_refinement.sh create mode 100755 Deformable-DETR/configs/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage.sh create mode 100755 Deformable-DETR/configs/r50_deformable_detr_single_scale.sh create mode 100755 Deformable-DETR/configs/r50_deformable_detr_single_scale_dc5.sh create mode 100644 Deformable-DETR/datasets/__init__.py create mode 100644 Deformable-DETR/datasets/coco.py create mode 100644 Deformable-DETR/datasets/coco_eval.py create mode 100644 Deformable-DETR/datasets/coco_panoptic.py create mode 100644 Deformable-DETR/datasets/data_prefetcher.py create mode 100644 Deformable-DETR/datasets/panoptic_eval.py create mode 100644 Deformable-DETR/datasets/samplers.py create mode 100644 Deformable-DETR/datasets/torchvision_datasets/__init__.py create mode 100644 Deformable-DETR/datasets/torchvision_datasets/coco.py create mode 100644 Deformable-DETR/datasets/transforms.py create mode 100644 Deformable-DETR/docs/changelog.md create mode 100644 Deformable-DETR/engine.py create mode 100644 Deformable-DETR/figs/convergence.png create mode 100644 Deformable-DETR/figs/illustration.png create mode 100644 Deformable-DETR/main.py create mode 100644 Deformable-DETR/models/__init__.py create mode 100644 Deformable-DETR/models/backbone.py create mode 100644 Deformable-DETR/models/deformable_detr.py create mode 100644 Deformable-DETR/models/deformable_transformer.py create mode 100644 Deformable-DETR/models/matcher.py create mode 100644 Deformable-DETR/models/ops/functions/__init__.py create mode 100644 Deformable-DETR/models/ops/functions/ms_deform_attn_func.py create mode 100755 Deformable-DETR/models/ops/make.sh create mode 100644 Deformable-DETR/models/ops/modules/__init__.py create mode 100644 Deformable-DETR/models/ops/modules/ms_deform_attn.py create mode 100644 Deformable-DETR/models/ops/setup.py create mode 100644 Deformable-DETR/models/ops/src/cpu/ms_deform_attn_cpu.cpp create mode 100644 Deformable-DETR/models/ops/src/cpu/ms_deform_attn_cpu.h create mode 100644 Deformable-DETR/models/ops/src/cuda/ms_deform_attn_cuda.cu create mode 100644 Deformable-DETR/models/ops/src/cuda/ms_deform_attn_cuda.h create mode 100644 Deformable-DETR/models/ops/src/cuda/ms_deform_im2col_cuda.cuh create mode 100644 Deformable-DETR/models/ops/src/ms_deform_attn.h create mode 100644 Deformable-DETR/models/ops/src/vision.cpp create mode 100644 Deformable-DETR/models/ops/test.py create mode 100644 Deformable-DETR/models/position_encoding.py create mode 100644 Deformable-DETR/models/segmentation.py create mode 100644 Deformable-DETR/requirements.txt create mode 100644 Deformable-DETR/tools/launch.py create mode 100755 Deformable-DETR/tools/run_dist_launch.sh create mode 100755 Deformable-DETR/tools/run_dist_slurm.sh create mode 100644 Deformable-DETR/util/__init__.py create mode 100644 Deformable-DETR/util/box_ops.py create mode 100644 Deformable-DETR/util/misc.py create mode 100644 Deformable-DETR/util/plot_utils.py diff --git a/Deformable-DETR/LICENSE b/Deformable-DETR/LICENSE new file mode 100644 index 0000000..522e5bd --- /dev/null +++ b/Deformable-DETR/LICENSE @@ -0,0 +1,220 @@ +Copyright (c) 2020 SenseTime. All Rights Reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 SenseTime + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +DETR + +Copyright 2020 - present, Facebook, Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/Deformable-DETR/README.md b/Deformable-DETR/README.md new file mode 100644 index 0000000..c9db563 --- /dev/null +++ b/Deformable-DETR/README.md @@ -0,0 +1,169 @@ +# Deformable DETR + +By [Xizhou Zhu](https://scholar.google.com/citations?user=02RXI00AAAAJ), [Weijie Su](https://www.weijiesu.com/), [Lewei Lu](https://www.linkedin.com/in/lewei-lu-94015977/), [Bin Li](http://staff.ustc.edu.cn/~binli/), [Xiaogang Wang](http://www.ee.cuhk.edu.hk/~xgwang/), [Jifeng Dai](https://jifengdai.org/). + +This repository is an official implementation of the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159). + + +## Introduction + +**TL; DR.** Deformable DETR is an efficient and fast-converging end-to-end object detector. It mitigates the high complexity and slow convergence issues of DETR via a novel sampling-based efficient attention mechanism. + +![deformable_detr](./figs/illustration.png) + +![deformable_detr](./figs/convergence.png) + +**Abstract.** DETR has been recently proposed to eliminate the need for many hand-designed components in object detection while demonstrating good performance. However, it suffers from slow convergence and limited feature spatial resolution, due to the limitation of Transformer attention modules in processing image feature maps. To mitigate these issues, we proposed Deformable DETR, whose attention modules only attend to a small set of key sampling points around a reference. Deformable DETR can achieve better performance than DETR (especially on small objects) with 10× less training epochs. Extensive experiments on the COCO benchmark demonstrate the effectiveness of our approach. + +## License + +This project is released under the [Apache 2.0 license](./LICENSE). + +## Changelog + +See [changelog.md](./docs/changelog.md) for detailed logs of major changes. + + +## Citing Deformable DETR +If you find Deformable DETR useful in your research, please consider citing: +```bibtex +@article{zhu2020deformable, + title={Deformable DETR: Deformable Transformers for End-to-End Object Detection}, + author={Zhu, Xizhou and Su, Weijie and Lu, Lewei and Li, Bin and Wang, Xiaogang and Dai, Jifeng}, + journal={arXiv preprint arXiv:2010.04159}, + year={2020} +} +``` + +## Main Results + +| Method | Epochs | AP | APS | APM | APL | params
(M)
| FLOPs
(G)
| Total
Train
Time
(GPU
hours)
| Train
Speed
(GPU
hours
/epoch)
| Infer
Speed
(FPS)
| Batch
Infer
Speed
(FPS)
| URL | +| ----------------------------------- | :----: | :--: | :----: | :---: | :------------------------------: | :--------------------:| :----------------------------------------------------------: | :--: | :---: | :---: | ----- | ----- | +| Faster R-CNN + FPN | 109 | 42.0 | 26.6 | 45.4 | 53.4 | 42 | 180 | 380 | 3.5 | 25.6 | 28.0 | - | +| DETR | 500 | 42.0 | 20.5 | 45.8 | 61.1 | 41 | 86 | 2000 | 4.0 | 27.0 | 38.3 | - | +| DETR-DC5 | 500 | 43.3 | 22.5 | 47.3 | 61.1 | 41 |187|7000|14.0|11.4|12.4| - | +| DETR-DC5 | 50 | 35.3 | 15.2 | 37.5 | 53.6 | 41 |187|700|14.0|11.4|12.4| - | +| DETR-DC5+ | 50 | 36.2 | 16.3 | 39.2 | 53.9 | 41 |187|700|14.0|11.4|12.4| - | +| **Deformable DETR
(single scale)
** | 50 | 39.4 | 20.6 | 43.0 | 55.5 | 34 |78|160|3.2|27.0|42.4| [config](./configs/r50_deformable_detr_single_scale.sh)
[log](https://drive.google.com/file/d/1n3ZnZ-UAqmTUR4AZoM4qQntIDn6qCZx4/view?usp=sharing)
[model](https://drive.google.com/file/d/1WEjQ9_FgfI5sw5OZZ4ix-OKk-IJ_-SDU/view?usp=sharing)
| +| **Deformable DETR
(single scale, DC5)
** | 50 | 41.5 | 24.1 | 45.3 | 56.0 | 34 |128|215|4.3|22.1|29.4| [config](./configs/r50_deformable_detr_single_scale_dc5.sh)
[log](https://drive.google.com/file/d/1-UfTp2q4GIkJjsaMRIkQxa5k5vn8_n-B/view?usp=sharing)
[model](https://drive.google.com/file/d/1m_TgMjzH7D44fbA-c_jiBZ-xf-odxGdk/view?usp=sharing)
| +| **Deformable DETR** | 50 | 44.5 | 27.1 | 47.6 | 59.6 | 40 |173|325|6.5|15.0|19.4|[config](./configs/r50_deformable_detr.sh)
[log](https://drive.google.com/file/d/18YSLshFjc_erOLfFC-hHu4MX4iyz1Dqr/view?usp=sharing)
[model](https://drive.google.com/file/d/1nDWZWHuRwtwGden77NLM9JoWe-YisJnA/view?usp=sharing)
| +| **+ iterative bounding box refinement** | 50 | 46.2 | 28.3 | 49.2 | 61.5 | 41 |173|325|6.5|15.0|19.4|[config](./configs/r50_deformable_detr_plus_iterative_bbox_refinement.sh)
[log](https://drive.google.com/file/d/1DFNloITi1SFBWjYzvVEAI75ndwmGM1Uj/view?usp=sharing)
[model](https://drive.google.com/file/d/1JYKyRYzUH7uo9eVfDaVCiaIGZb5YTCuI/view?usp=sharing)
| +| **++ two-stage Deformable DETR** | 50 | 46.9 | 29.6 | 50.1 | 61.6 | 41 |173|340|6.8|14.5|18.8|[config](./configs/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage.sh)
[log](https://drive.google.com/file/d/1ozi0wbv5-Sc5TbWt1jAuXco72vEfEtbY/view?usp=sharing)
[model](https://drive.google.com/file/d/15I03A7hNTpwuLNdfuEmW9_taZMNVssEp/view?usp=sharing)
| + +*Note:* + +1. All models of Deformable DETR are trained with total batch size of 32. +2. Training and inference speed are measured on NVIDIA Tesla V100 GPU. +3. "Deformable DETR (single scale)" means only using res5 feature map (of stride 32) as input feature maps for Deformable Transformer Encoder. +4. "DC5" means removing the stride in C5 stage of ResNet and add a dilation of 2 instead. +5. "DETR-DC5+" indicates DETR-DC5 with some modifications, including using Focal Loss for bounding box classification and increasing number of object queries to 300. +6. "Batch Infer Speed" refer to inference with batch size = 4 to maximize GPU utilization. +7. The original implementation is based on our internal codebase. There are slight differences in the final accuracy and running time due to the plenty details in platform switch. + + +## Installation + +### Requirements + +* Linux, CUDA>=9.2, GCC>=5.4 + +* Python>=3.7 + + We recommend you to use Anaconda to create a conda environment: + ```bash + conda create -n deformable_detr python=3.7 pip + ``` + Then, activate the environment: + ```bash + conda activate deformable_detr + ``` + +* PyTorch>=1.5.1, torchvision>=0.6.1 (following instructions [here](https://pytorch.org/)) + + For example, if your CUDA version is 9.2, you could install pytorch and torchvision as following: + ```bash + conda install pytorch=1.5.1 torchvision=0.6.1 cudatoolkit=9.2 -c pytorch + ``` + +* Other requirements + ```bash + pip install -r requirements.txt + ``` + +### Compiling CUDA operators +```bash +cd ./models/ops +sh ./make.sh +# unit test (should see all checking is True) +python test.py +``` + +## Usage + +### Dataset preparation + +Please download [COCO 2017 dataset](https://cocodataset.org/) and organize them as following: + +``` +code_root/ +└── data/ + └── coco/ + ├── train2017/ + ├── val2017/ + └── annotations/ + ├── instances_train2017.json + └── instances_val2017.json +``` + +### Training + +#### Training on single node + +For example, the command for training Deformable DETR on 8 GPUs is as following: + +```bash +GPUS_PER_NODE=8 ./tools/run_dist_launch.sh 8 ./configs/r50_deformable_detr.sh +``` + +#### Training on multiple nodes + +For example, the command for training Deformable DETR on 2 nodes of each with 8 GPUs is as following: + +On node 1: + +```bash +MASTER_ADDR= NODE_RANK=0 GPUS_PER_NODE=8 ./tools/run_dist_launch.sh 16 ./configs/r50_deformable_detr.sh +``` + +On node 2: + +```bash +MASTER_ADDR= NODE_RANK=1 GPUS_PER_NODE=8 ./tools/run_dist_launch.sh 16 ./configs/r50_deformable_detr.sh +``` + +#### Training on slurm cluster + +If you are using slurm cluster, you can simply run the following command to train on 1 node with 8 GPUs: + +```bash +GPUS_PER_NODE=8 ./tools/run_dist_slurm.sh deformable_detr 8 configs/r50_deformable_detr.sh +``` + +Or 2 nodes of each with 8 GPUs: + +```bash +GPUS_PER_NODE=8 ./tools/run_dist_slurm.sh deformable_detr 16 configs/r50_deformable_detr.sh +``` +#### Some tips to speed-up training +* If your file system is slow to read images, you may consider enabling '--cache_mode' option to load whole dataset into memory at the beginning of training. +* You may increase the batch size to maximize the GPU utilization, according to GPU memory of yours, e.g., set '--batch_size 3' or '--batch_size 4'. + +### Evaluation + +You can get the config file and pretrained model of Deformable DETR (the link is in "Main Results" session), then run following command to evaluate it on COCO 2017 validation set: + +```bash + --resume --eval +``` + +You can also run distributed evaluation by using ```./tools/run_dist_launch.sh``` or ```./tools/run_dist_slurm.sh```. diff --git a/Deformable-DETR/benchmark.py b/Deformable-DETR/benchmark.py new file mode 100644 index 0000000..5919477 --- /dev/null +++ b/Deformable-DETR/benchmark.py @@ -0,0 +1,67 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ + +""" +Benchmark inference speed of Deformable DETR. +""" +import os +import time +import argparse + +import torch + +from main import get_args_parser as get_main_args_parser +from models import build_model +from datasets import build_dataset +from util.misc import nested_tensor_from_tensor_list + + +def get_benckmark_arg_parser(): + parser = argparse.ArgumentParser('Benchmark inference speed of Deformable DETR.') + parser.add_argument('--num_iters', type=int, default=300, help='total iters to benchmark speed') + parser.add_argument('--warm_iters', type=int, default=5, help='ignore first several iters that are very slow') + parser.add_argument('--batch_size', type=int, default=1, help='batch size in inference') + parser.add_argument('--resume', type=str, help='load the pre-trained checkpoint') + return parser + + +@torch.no_grad() +def measure_average_inference_time(model, inputs, num_iters=100, warm_iters=5): + ts = [] + for iter_ in range(num_iters): + torch.cuda.synchronize() + t_ = time.perf_counter() + model(inputs) + torch.cuda.synchronize() + t = time.perf_counter() - t_ + if iter_ >= warm_iters: + ts.append(t) + print(ts) + return sum(ts) / len(ts) + + +def benchmark(): + args, _ = get_benckmark_arg_parser().parse_known_args() + main_args = get_main_args_parser().parse_args(_) + assert args.warm_iters < args.num_iters and args.num_iters > 0 and args.warm_iters >= 0 + assert args.batch_size > 0 + assert args.resume is None or os.path.exists(args.resume) + dataset = build_dataset('val', main_args) + model, _, _ = build_model(main_args) + model.cuda() + model.eval() + if args.resume is not None: + ckpt = torch.load(args.resume, map_location=lambda storage, loc: storage) + model.load_state_dict(ckpt['model']) + inputs = nested_tensor_from_tensor_list([dataset.__getitem__(0)[0].cuda() for _ in range(args.batch_size)]) + t = measure_average_inference_time(model, inputs, args.num_iters, args.warm_iters) + return 1.0 / t * args.batch_size + + +if __name__ == '__main__': + fps = benchmark() + print(f'Inference Speed: {fps:.1f} FPS') + diff --git a/Deformable-DETR/configs/r50_deformable_detr.sh b/Deformable-DETR/configs/r50_deformable_detr.sh new file mode 100755 index 0000000..a42953f --- /dev/null +++ b/Deformable-DETR/configs/r50_deformable_detr.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -x + +EXP_DIR=exps/r50_deformable_detr +PY_ARGS=${@:1} + +python -u main.py \ + --output_dir ${EXP_DIR} \ + ${PY_ARGS} diff --git a/Deformable-DETR/configs/r50_deformable_detr_plus_iterative_bbox_refinement.sh b/Deformable-DETR/configs/r50_deformable_detr_plus_iterative_bbox_refinement.sh new file mode 100755 index 0000000..8ea2000 --- /dev/null +++ b/Deformable-DETR/configs/r50_deformable_detr_plus_iterative_bbox_refinement.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -x + +EXP_DIR=exps/r50_deformable_detr_plus_iterative_bbox_refinement +PY_ARGS=${@:1} + +python -u main.py \ + --output_dir ${EXP_DIR} \ + --with_box_refine \ + ${PY_ARGS} diff --git a/Deformable-DETR/configs/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage.sh b/Deformable-DETR/configs/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage.sh new file mode 100755 index 0000000..722c658 --- /dev/null +++ b/Deformable-DETR/configs/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -x + +EXP_DIR=exps/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage +PY_ARGS=${@:1} + +python -u main.py \ + --output_dir ${EXP_DIR} \ + --with_box_refine \ + --two_stage \ + ${PY_ARGS} diff --git a/Deformable-DETR/configs/r50_deformable_detr_single_scale.sh b/Deformable-DETR/configs/r50_deformable_detr_single_scale.sh new file mode 100755 index 0000000..a24e547 --- /dev/null +++ b/Deformable-DETR/configs/r50_deformable_detr_single_scale.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -x + +EXP_DIR=exps/r50_deformable_detr_single_scale +PY_ARGS=${@:1} + +python -u main.py \ + --num_feature_levels 1 \ + --output_dir ${EXP_DIR} \ + ${PY_ARGS} diff --git a/Deformable-DETR/configs/r50_deformable_detr_single_scale_dc5.sh b/Deformable-DETR/configs/r50_deformable_detr_single_scale_dc5.sh new file mode 100755 index 0000000..26d35d6 --- /dev/null +++ b/Deformable-DETR/configs/r50_deformable_detr_single_scale_dc5.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -x + +EXP_DIR=exps/r50_deformable_detr_single_scale_dc5 +PY_ARGS=${@:1} + +python -u main.py \ + --num_feature_levels 1 \ + --dilation \ + --output_dir ${EXP_DIR} \ + ${PY_ARGS} diff --git a/Deformable-DETR/datasets/__init__.py b/Deformable-DETR/datasets/__init__.py new file mode 100644 index 0000000..f5bd856 --- /dev/null +++ b/Deformable-DETR/datasets/__init__.py @@ -0,0 +1,33 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +import torch.utils.data +from .torchvision_datasets import CocoDetection + +from .coco import build as build_coco + + +def get_coco_api_from_dataset(dataset): + for _ in range(10): + # if isinstance(dataset, torchvision.datasets.CocoDetection): + # break + if isinstance(dataset, torch.utils.data.Subset): + dataset = dataset.dataset + if isinstance(dataset, CocoDetection): + return dataset.coco + + +def build_dataset(image_set, args): + if args.dataset_file == 'coco': + return build_coco(image_set, args) + if args.dataset_file == 'coco_panoptic': + # to avoid making panopticapi required for coco + from .coco_panoptic import build as build_coco_panoptic + return build_coco_panoptic(image_set, args) + raise ValueError(f'dataset {args.dataset_file} not supported') diff --git a/Deformable-DETR/datasets/coco.py b/Deformable-DETR/datasets/coco.py new file mode 100644 index 0000000..1be8308 --- /dev/null +++ b/Deformable-DETR/datasets/coco.py @@ -0,0 +1,169 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +""" +COCO dataset which returns image_id for evaluation. + +Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py +""" +from pathlib import Path + +import torch +import torch.utils.data +from pycocotools import mask as coco_mask + +from .torchvision_datasets import CocoDetection as TvCocoDetection +from util.misc import get_local_rank, get_local_size +import datasets.transforms as T + + +class CocoDetection(TvCocoDetection): + def __init__(self, img_folder, ann_file, transforms, return_masks, cache_mode=False, local_rank=0, local_size=1): + super(CocoDetection, self).__init__(img_folder, ann_file, + cache_mode=cache_mode, local_rank=local_rank, local_size=local_size) + self._transforms = transforms + self.prepare = ConvertCocoPolysToMask(return_masks) + + def __getitem__(self, idx): + img, target = super(CocoDetection, self).__getitem__(idx) + image_id = self.ids[idx] + target = {'image_id': image_id, 'annotations': target} + img, target = self.prepare(img, target) + if self._transforms is not None: + img, target = self._transforms(img, target) + return img, target + + +def convert_coco_poly_to_mask(segmentations, height, width): + masks = [] + for polygons in segmentations: + rles = coco_mask.frPyObjects(polygons, height, width) + mask = coco_mask.decode(rles) + if len(mask.shape) < 3: + mask = mask[..., None] + mask = torch.as_tensor(mask, dtype=torch.uint8) + mask = mask.any(dim=2) + masks.append(mask) + if masks: + masks = torch.stack(masks, dim=0) + else: + masks = torch.zeros((0, height, width), dtype=torch.uint8) + return masks + + +class ConvertCocoPolysToMask(object): + def __init__(self, return_masks=False): + self.return_masks = return_masks + + def __call__(self, image, target): + w, h = image.size + + image_id = target["image_id"] + image_id = torch.tensor([image_id]) + + anno = target["annotations"] + + anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0] + + boxes = [obj["bbox"] for obj in anno] + # guard against no boxes via resizing + boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) + boxes[:, 2:] += boxes[:, :2] + boxes[:, 0::2].clamp_(min=0, max=w) + boxes[:, 1::2].clamp_(min=0, max=h) + + classes = [obj["category_id"] for obj in anno] + classes = torch.tensor(classes, dtype=torch.int64) + + if self.return_masks: + segmentations = [obj["segmentation"] for obj in anno] + masks = convert_coco_poly_to_mask(segmentations, h, w) + + keypoints = None + if anno and "keypoints" in anno[0]: + keypoints = [obj["keypoints"] for obj in anno] + keypoints = torch.as_tensor(keypoints, dtype=torch.float32) + num_keypoints = keypoints.shape[0] + if num_keypoints: + keypoints = keypoints.view(num_keypoints, -1, 3) + + keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) + boxes = boxes[keep] + classes = classes[keep] + if self.return_masks: + masks = masks[keep] + if keypoints is not None: + keypoints = keypoints[keep] + + target = {} + target["boxes"] = boxes + target["labels"] = classes + if self.return_masks: + target["masks"] = masks + target["image_id"] = image_id + if keypoints is not None: + target["keypoints"] = keypoints + + # for conversion to coco api + area = torch.tensor([obj["area"] for obj in anno]) + iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno]) + target["area"] = area[keep] + target["iscrowd"] = iscrowd[keep] + + target["orig_size"] = torch.as_tensor([int(h), int(w)]) + target["size"] = torch.as_tensor([int(h), int(w)]) + + return image, target + + +def make_coco_transforms(image_set): + + normalize = T.Compose([ + T.ToTensor(), + T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]) + + scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800] + + if image_set == 'train': + return T.Compose([ + T.RandomHorizontalFlip(), + T.RandomSelect( + T.RandomResize(scales, max_size=1333), + T.Compose([ + T.RandomResize([400, 500, 600]), + T.RandomSizeCrop(384, 600), + T.RandomResize(scales, max_size=1333), + ]) + ), + normalize, + ]) + + if image_set == 'val': + return T.Compose([ + T.RandomResize([800], max_size=1333), + normalize, + ]) + + raise ValueError(f'unknown {image_set}') + + +def build(image_set, args): + root = Path(args.coco_path) + assert root.exists(), f'provided COCO path {root} does not exist' + mode = 'instances' + PATHS = { + "train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'), + "val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'), + } + + img_folder, ann_file = PATHS[image_set] + dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks, + cache_mode=args.cache_mode, local_rank=get_local_rank(), local_size=get_local_size()) + return dataset diff --git a/Deformable-DETR/datasets/coco_eval.py b/Deformable-DETR/datasets/coco_eval.py new file mode 100644 index 0000000..9a3ebe7 --- /dev/null +++ b/Deformable-DETR/datasets/coco_eval.py @@ -0,0 +1,265 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +""" +COCO evaluator that works in distributed mode. + +Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py +The difference is that there is less copy-pasting from pycocotools +in the end of the file, as python3 can suppress prints with contextlib +""" +import os +import contextlib +import copy +import numpy as np +import torch + +from pycocotools.cocoeval import COCOeval +from pycocotools.coco import COCO +import pycocotools.mask as mask_util + +from util.misc import all_gather + + +class CocoEvaluator(object): + def __init__(self, coco_gt, iou_types): + assert isinstance(iou_types, (list, tuple)) + coco_gt = copy.deepcopy(coco_gt) + self.coco_gt = coco_gt + + self.iou_types = iou_types + self.coco_eval = {} + for iou_type in iou_types: + self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type) + + self.img_ids = [] + self.eval_imgs = {k: [] for k in iou_types} + + def update(self, predictions): + img_ids = list(np.unique(list(predictions.keys()))) + self.img_ids.extend(img_ids) + + for iou_type in self.iou_types: + results = self.prepare(predictions, iou_type) + + # suppress pycocotools prints + with open(os.devnull, 'w') as devnull: + with contextlib.redirect_stdout(devnull): + coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO() + coco_eval = self.coco_eval[iou_type] + + coco_eval.cocoDt = coco_dt + coco_eval.params.imgIds = list(img_ids) + img_ids, eval_imgs = evaluate(coco_eval) + + self.eval_imgs[iou_type].append(eval_imgs) + + def synchronize_between_processes(self): + for iou_type in self.iou_types: + self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2) + create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type]) + + def accumulate(self): + for coco_eval in self.coco_eval.values(): + coco_eval.accumulate() + + def summarize(self): + for iou_type, coco_eval in self.coco_eval.items(): + print("IoU metric: {}".format(iou_type)) + coco_eval.summarize() + + def prepare(self, predictions, iou_type): + if iou_type == "bbox": + return self.prepare_for_coco_detection(predictions) + elif iou_type == "segm": + return self.prepare_for_coco_segmentation(predictions) + elif iou_type == "keypoints": + return self.prepare_for_coco_keypoint(predictions) + else: + raise ValueError("Unknown iou type {}".format(iou_type)) + + def prepare_for_coco_detection(self, predictions): + coco_results = [] + for original_id, prediction in predictions.items(): + if len(prediction) == 0: + continue + + boxes = prediction["boxes"] + boxes = convert_to_xywh(boxes).tolist() + scores = prediction["scores"].tolist() + labels = prediction["labels"].tolist() + + coco_results.extend( + [ + { + "image_id": original_id, + "category_id": labels[k], + "bbox": box, + "score": scores[k], + } + for k, box in enumerate(boxes) + ] + ) + return coco_results + + def prepare_for_coco_segmentation(self, predictions): + coco_results = [] + for original_id, prediction in predictions.items(): + if len(prediction) == 0: + continue + + scores = prediction["scores"] + labels = prediction["labels"] + masks = prediction["masks"] + + masks = masks > 0.5 + + scores = prediction["scores"].tolist() + labels = prediction["labels"].tolist() + + rles = [ + mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] + for mask in masks + ] + for rle in rles: + rle["counts"] = rle["counts"].decode("utf-8") + + coco_results.extend( + [ + { + "image_id": original_id, + "category_id": labels[k], + "segmentation": rle, + "score": scores[k], + } + for k, rle in enumerate(rles) + ] + ) + return coco_results + + def prepare_for_coco_keypoint(self, predictions): + coco_results = [] + for original_id, prediction in predictions.items(): + if len(prediction) == 0: + continue + + boxes = prediction["boxes"] + boxes = convert_to_xywh(boxes).tolist() + scores = prediction["scores"].tolist() + labels = prediction["labels"].tolist() + keypoints = prediction["keypoints"] + keypoints = keypoints.flatten(start_dim=1).tolist() + + coco_results.extend( + [ + { + "image_id": original_id, + "category_id": labels[k], + 'keypoints': keypoint, + "score": scores[k], + } + for k, keypoint in enumerate(keypoints) + ] + ) + return coco_results + + +def convert_to_xywh(boxes): + xmin, ymin, xmax, ymax = boxes.unbind(1) + return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1) + + +def merge(img_ids, eval_imgs): + all_img_ids = all_gather(img_ids) + all_eval_imgs = all_gather(eval_imgs) + + merged_img_ids = [] + for p in all_img_ids: + merged_img_ids.extend(p) + + merged_eval_imgs = [] + for p in all_eval_imgs: + merged_eval_imgs.append(p) + + merged_img_ids = np.array(merged_img_ids) + merged_eval_imgs = np.concatenate(merged_eval_imgs, 2) + + # keep only unique (and in sorted order) images + merged_img_ids, idx = np.unique(merged_img_ids, return_index=True) + merged_eval_imgs = merged_eval_imgs[..., idx] + + return merged_img_ids, merged_eval_imgs + + +def create_common_coco_eval(coco_eval, img_ids, eval_imgs): + img_ids, eval_imgs = merge(img_ids, eval_imgs) + img_ids = list(img_ids) + eval_imgs = list(eval_imgs.flatten()) + + coco_eval.evalImgs = eval_imgs + coco_eval.params.imgIds = img_ids + coco_eval._paramsEval = copy.deepcopy(coco_eval.params) + + +################################################################# +# From pycocotools, just removed the prints and fixed +# a Python3 bug about unicode not defined +################################################################# + + +def evaluate(self): + ''' + Run per image evaluation on given images and store results (a list of dict) in self.evalImgs + :return: None + ''' + # tic = time.time() + # print('Running per image evaluation...') + p = self.params + # add backward compatibility if useSegm is specified in params + if p.useSegm is not None: + p.iouType = 'segm' if p.useSegm == 1 else 'bbox' + print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType)) + # print('Evaluate annotation type *{}*'.format(p.iouType)) + p.imgIds = list(np.unique(p.imgIds)) + if p.useCats: + p.catIds = list(np.unique(p.catIds)) + p.maxDets = sorted(p.maxDets) + self.params = p + + self._prepare() + # loop through images, area range, max detection number + catIds = p.catIds if p.useCats else [-1] + + if p.iouType == 'segm' or p.iouType == 'bbox': + computeIoU = self.computeIoU + elif p.iouType == 'keypoints': + computeIoU = self.computeOks + self.ious = { + (imgId, catId): computeIoU(imgId, catId) + for imgId in p.imgIds + for catId in catIds} + + evaluateImg = self.evaluateImg + maxDet = p.maxDets[-1] + evalImgs = [ + evaluateImg(imgId, catId, areaRng, maxDet) + for catId in catIds + for areaRng in p.areaRng + for imgId in p.imgIds + ] + # this is NOT in the pycocotools code, but could be done outside + evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds)) + self._paramsEval = copy.deepcopy(self.params) + # toc = time.time() + # print('DONE (t={:0.2f}s).'.format(toc-tic)) + return p.imgIds, evalImgs + +################################################################# +# end of straight copy from pycocotools, just removing the prints +################################################################# diff --git a/Deformable-DETR/datasets/coco_panoptic.py b/Deformable-DETR/datasets/coco_panoptic.py new file mode 100644 index 0000000..e856e49 --- /dev/null +++ b/Deformable-DETR/datasets/coco_panoptic.py @@ -0,0 +1,107 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +import json +from pathlib import Path + +import numpy as np +import torch +from PIL import Image + +from panopticapi.utils import rgb2id +from util.box_ops import masks_to_boxes + +from .coco import make_coco_transforms + + +class CocoPanoptic: + def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True): + with open(ann_file, 'r') as f: + self.coco = json.load(f) + + # sort 'images' field so that they are aligned with 'annotations' + # i.e., in alphabetical order + self.coco['images'] = sorted(self.coco['images'], key=lambda x: x['id']) + # sanity check + if "annotations" in self.coco: + for img, ann in zip(self.coco['images'], self.coco['annotations']): + assert img['file_name'][:-4] == ann['file_name'][:-4] + + self.img_folder = img_folder + self.ann_folder = ann_folder + self.ann_file = ann_file + self.transforms = transforms + self.return_masks = return_masks + + def __getitem__(self, idx): + ann_info = self.coco['annotations'][idx] if "annotations" in self.coco else self.coco['images'][idx] + img_path = Path(self.img_folder) / ann_info['file_name'].replace('.png', '.jpg') + ann_path = Path(self.ann_folder) / ann_info['file_name'] + + img = Image.open(img_path).convert('RGB') + w, h = img.size + if "segments_info" in ann_info: + masks = np.asarray(Image.open(ann_path), dtype=np.uint32) + masks = rgb2id(masks) + + ids = np.array([ann['id'] for ann in ann_info['segments_info']]) + masks = masks == ids[:, None, None] + + masks = torch.as_tensor(masks, dtype=torch.uint8) + labels = torch.tensor([ann['category_id'] for ann in ann_info['segments_info']], dtype=torch.int64) + + target = {} + target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]]) + if self.return_masks: + target['masks'] = masks + target['labels'] = labels + + target["boxes"] = masks_to_boxes(masks) + + target['size'] = torch.as_tensor([int(h), int(w)]) + target['orig_size'] = torch.as_tensor([int(h), int(w)]) + if "segments_info" in ann_info: + for name in ['iscrowd', 'area']: + target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']]) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target + + def __len__(self): + return len(self.coco['images']) + + def get_height_and_width(self, idx): + img_info = self.coco['images'][idx] + height = img_info['height'] + width = img_info['width'] + return height, width + + +def build(image_set, args): + img_folder_root = Path(args.coco_path) + ann_folder_root = Path(args.coco_panoptic_path) + assert img_folder_root.exists(), f'provided COCO path {img_folder_root} does not exist' + assert ann_folder_root.exists(), f'provided COCO path {ann_folder_root} does not exist' + mode = 'panoptic' + PATHS = { + "train": ("train2017", Path("annotations") / f'{mode}_train2017.json'), + "val": ("val2017", Path("annotations") / f'{mode}_val2017.json'), + } + + img_folder, ann_file = PATHS[image_set] + img_folder_path = img_folder_root / img_folder + ann_folder = ann_folder_root / f'{mode}_{img_folder}' + ann_file = ann_folder_root / ann_file + + dataset = CocoPanoptic(img_folder_path, ann_folder, ann_file, + transforms=make_coco_transforms(image_set), return_masks=args.masks) + + return dataset diff --git a/Deformable-DETR/datasets/data_prefetcher.py b/Deformable-DETR/datasets/data_prefetcher.py new file mode 100644 index 0000000..7d28d9f --- /dev/null +++ b/Deformable-DETR/datasets/data_prefetcher.py @@ -0,0 +1,70 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ + +import torch + +def to_cuda(samples, targets, device): + samples = samples.to(device, non_blocking=True) + targets = [{k: v.to(device, non_blocking=True) for k, v in t.items()} for t in targets] + return samples, targets + +class data_prefetcher(): + def __init__(self, loader, device, prefetch=True): + self.loader = iter(loader) + self.prefetch = prefetch + self.device = device + if prefetch: + self.stream = torch.cuda.Stream() + self.preload() + + def preload(self): + try: + self.next_samples, self.next_targets = next(self.loader) + except StopIteration: + self.next_samples = None + self.next_targets = None + return + # if record_stream() doesn't work, another option is to make sure device inputs are created + # on the main stream. + # self.next_input_gpu = torch.empty_like(self.next_input, device='cuda') + # self.next_target_gpu = torch.empty_like(self.next_target, device='cuda') + # Need to make sure the memory allocated for next_* is not still in use by the main stream + # at the time we start copying to next_*: + # self.stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(self.stream): + self.next_samples, self.next_targets = to_cuda(self.next_samples, self.next_targets, self.device) + # more code for the alternative if record_stream() doesn't work: + # copy_ will record the use of the pinned source tensor in this side stream. + # self.next_input_gpu.copy_(self.next_input, non_blocking=True) + # self.next_target_gpu.copy_(self.next_target, non_blocking=True) + # self.next_input = self.next_input_gpu + # self.next_target = self.next_target_gpu + + # With Amp, it isn't necessary to manually convert data to half. + # if args.fp16: + # self.next_input = self.next_input.half() + # else: + + def next(self): + if self.prefetch: + torch.cuda.current_stream().wait_stream(self.stream) + samples = self.next_samples + targets = self.next_targets + if samples is not None: + samples.record_stream(torch.cuda.current_stream()) + if targets is not None: + for t in targets: + for k, v in t.items(): + v.record_stream(torch.cuda.current_stream()) + self.preload() + else: + try: + samples, targets = next(self.loader) + samples, targets = to_cuda(samples, targets, self.device) + except StopIteration: + samples = None + targets = None + return samples, targets diff --git a/Deformable-DETR/datasets/panoptic_eval.py b/Deformable-DETR/datasets/panoptic_eval.py new file mode 100644 index 0000000..0dabffd --- /dev/null +++ b/Deformable-DETR/datasets/panoptic_eval.py @@ -0,0 +1,52 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +import json +import os + +import util.misc as utils + +try: + from panopticapi.evaluation import pq_compute +except ImportError: + pass + + +class PanopticEvaluator(object): + def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"): + self.gt_json = ann_file + self.gt_folder = ann_folder + if utils.is_main_process(): + if not os.path.exists(output_dir): + os.mkdir(output_dir) + self.output_dir = output_dir + self.predictions = [] + + def update(self, predictions): + for p in predictions: + with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f: + f.write(p.pop("png_string")) + + self.predictions += predictions + + def synchronize_between_processes(self): + all_predictions = utils.all_gather(self.predictions) + merged_predictions = [] + for p in all_predictions: + merged_predictions += p + self.predictions = merged_predictions + + def summarize(self): + if utils.is_main_process(): + json_data = {"annotations": self.predictions} + predictions_json = os.path.join(self.output_dir, "predictions.json") + with open(predictions_json, "w") as f: + f.write(json.dumps(json_data)) + return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir) + return None diff --git a/Deformable-DETR/datasets/samplers.py b/Deformable-DETR/datasets/samplers.py new file mode 100644 index 0000000..14c0af2 --- /dev/null +++ b/Deformable-DETR/datasets/samplers.py @@ -0,0 +1,139 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from codes in torch.utils.data.distributed +# ------------------------------------------------------------------------ + +import os +import math +import torch +import torch.distributed as dist +from torch.utils.data.sampler import Sampler + + +class DistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, dataset, num_replicas=None, rank=None, local_rank=None, local_size=None, shuffle=True): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + + def __iter__(self): + if self.shuffle: + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + indices += indices[: (self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + offset = self.num_samples * self.rank + indices = indices[offset : offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch + + +class NodeDistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, dataset, num_replicas=None, rank=None, local_rank=None, local_size=None, shuffle=True): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + if local_rank is None: + local_rank = int(os.environ.get('LOCAL_RANK', 0)) + if local_size is None: + local_size = int(os.environ.get('LOCAL_SIZE', 1)) + self.dataset = dataset + self.shuffle = shuffle + self.num_replicas = num_replicas + self.num_parts = local_size + self.rank = rank + self.local_rank = local_rank + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + + self.total_size_parts = self.num_samples * self.num_replicas // self.num_parts + + def __iter__(self): + if self.shuffle: + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + indices = [i for i in indices if i % self.num_parts == self.local_rank] + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size_parts - len(indices))] + assert len(indices) == self.total_size_parts + + # subsample + indices = indices[self.rank // self.num_parts:self.total_size_parts:self.num_replicas // self.num_parts] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch diff --git a/Deformable-DETR/datasets/torchvision_datasets/__init__.py b/Deformable-DETR/datasets/torchvision_datasets/__init__.py new file mode 100644 index 0000000..162303c --- /dev/null +++ b/Deformable-DETR/datasets/torchvision_datasets/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ + +from .coco import CocoDetection diff --git a/Deformable-DETR/datasets/torchvision_datasets/coco.py b/Deformable-DETR/datasets/torchvision_datasets/coco.py new file mode 100644 index 0000000..45b5f52 --- /dev/null +++ b/Deformable-DETR/datasets/torchvision_datasets/coco.py @@ -0,0 +1,84 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from torchvision +# ------------------------------------------------------------------------ + +""" +Copy-Paste from torchvision, but add utility of caching images on memory +""" +from torchvision.datasets.vision import VisionDataset +from PIL import Image +import os +import os.path +import tqdm +from io import BytesIO + + +class CocoDetection(VisionDataset): + """`MS Coco Detection `_ Dataset. + Args: + root (string): Root directory where images are downloaded to. + annFile (string): Path to json annotation file. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + def __init__(self, root, annFile, transform=None, target_transform=None, transforms=None, + cache_mode=False, local_rank=0, local_size=1): + super(CocoDetection, self).__init__(root, transforms, transform, target_transform) + from pycocotools.coco import COCO + self.coco = COCO(annFile) + self.ids = list(sorted(self.coco.imgs.keys())) + self.cache_mode = cache_mode + self.local_rank = local_rank + self.local_size = local_size + if cache_mode: + self.cache = {} + self.cache_images() + + def cache_images(self): + self.cache = {} + for index, img_id in zip(tqdm.trange(len(self.ids)), self.ids): + if index % self.local_size != self.local_rank: + continue + path = self.coco.loadImgs(img_id)[0]['file_name'] + with open(os.path.join(self.root, path), 'rb') as f: + self.cache[path] = f.read() + + def get_image(self, path): + if self.cache_mode: + if path not in self.cache.keys(): + with open(os.path.join(self.root, path), 'rb') as f: + self.cache[path] = f.read() + return Image.open(BytesIO(self.cache[path])).convert('RGB') + return Image.open(os.path.join(self.root, path)).convert('RGB') + + def __getitem__(self, index): + """ + Args: + index (int): Index + Returns: + tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``. + """ + coco = self.coco + img_id = self.ids[index] + ann_ids = coco.getAnnIds(imgIds=img_id) + target = coco.loadAnns(ann_ids) + + path = coco.loadImgs(img_id)[0]['file_name'] + + img = self.get_image(path) + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target + + def __len__(self): + return len(self.ids) diff --git a/Deformable-DETR/datasets/transforms.py b/Deformable-DETR/datasets/transforms.py new file mode 100644 index 0000000..8f4baeb --- /dev/null +++ b/Deformable-DETR/datasets/transforms.py @@ -0,0 +1,284 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +""" +Transforms and data augmentation for both image + bbox. +""" +import random + +import PIL +import torch +import torchvision.transforms as T +import torchvision.transforms.functional as F + +from util.box_ops import box_xyxy_to_cxcywh +from util.misc import interpolate + + +def crop(image, target, region): + cropped_image = F.crop(image, *region) + + target = target.copy() + i, j, h, w = region + + # should we do something wrt the original size? + target["size"] = torch.tensor([h, w]) + + fields = ["labels", "area", "iscrowd"] + + if "boxes" in target: + boxes = target["boxes"] + max_size = torch.as_tensor([w, h], dtype=torch.float32) + cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) + cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) + cropped_boxes = cropped_boxes.clamp(min=0) + area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) + target["boxes"] = cropped_boxes.reshape(-1, 4) + target["area"] = area + fields.append("boxes") + + if "masks" in target: + # FIXME should we update the area here if there are no boxes? + target['masks'] = target['masks'][:, i:i + h, j:j + w] + fields.append("masks") + + # remove elements for which the boxes or masks that have zero area + if "boxes" in target or "masks" in target: + # favor boxes selection when defining which elements to keep + # this is compatible with previous implementation + if "boxes" in target: + cropped_boxes = target['boxes'].reshape(-1, 2, 2) + keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) + else: + keep = target['masks'].flatten(1).any(1) + + for field in fields: + target[field] = target[field][keep] + + return cropped_image, target + + +def hflip(image, target): + flipped_image = F.hflip(image) + + w, h = image.size + + target = target.copy() + if "boxes" in target: + boxes = target["boxes"] + boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0]) + target["boxes"] = boxes + + if "masks" in target: + target['masks'] = target['masks'].flip(-1) + + return flipped_image, target + + +def resize(image, target, size, max_size=None): + # size can be min_size (scalar) or (w, h) tuple + + def get_size_with_aspect_ratio(image_size, size, max_size=None): + w, h = image_size + if max_size is not None: + min_original_size = float(min((w, h))) + max_original_size = float(max((w, h))) + if max_original_size / min_original_size * size > max_size: + size = int(round(max_size * min_original_size / max_original_size)) + + if (w <= h and w == size) or (h <= w and h == size): + return (h, w) + + if w < h: + ow = size + oh = int(size * h / w) + else: + oh = size + ow = int(size * w / h) + + return (oh, ow) + + def get_size(image_size, size, max_size=None): + if isinstance(size, (list, tuple)): + return size[::-1] + else: + return get_size_with_aspect_ratio(image_size, size, max_size) + + size = get_size(image.size, size, max_size) + rescaled_image = F.resize(image, size) + + if target is None: + return rescaled_image, None + + ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) + ratio_width, ratio_height = ratios + + target = target.copy() + if "boxes" in target: + boxes = target["boxes"] + scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) + target["boxes"] = scaled_boxes + + if "area" in target: + area = target["area"] + scaled_area = area * (ratio_width * ratio_height) + target["area"] = scaled_area + + h, w = size + target["size"] = torch.tensor([h, w]) + + if "masks" in target: + target['masks'] = interpolate( + target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5 + + return rescaled_image, target + + +def pad(image, target, padding): + # assumes that we only pad on the bottom right corners + padded_image = F.pad(image, (0, 0, padding[0], padding[1])) + if target is None: + return padded_image, None + target = target.copy() + # should we do something wrt the original size? + target["size"] = torch.tensor(padded_image[::-1]) + if "masks" in target: + target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1])) + return padded_image, target + + +class RandomCrop(object): + def __init__(self, size): + self.size = size + + def __call__(self, img, target): + region = T.RandomCrop.get_params(img, self.size) + return crop(img, target, region) + + +class RandomSizeCrop(object): + def __init__(self, min_size: int, max_size: int): + self.min_size = min_size + self.max_size = max_size + + def __call__(self, img: PIL.Image.Image, target: dict): + w = random.randint(self.min_size, min(img.width, self.max_size)) + h = random.randint(self.min_size, min(img.height, self.max_size)) + region = T.RandomCrop.get_params(img, [h, w]) + return crop(img, target, region) + + +class CenterCrop(object): + def __init__(self, size): + self.size = size + + def __call__(self, img, target): + image_width, image_height = img.size + crop_height, crop_width = self.size + crop_top = int(round((image_height - crop_height) / 2.)) + crop_left = int(round((image_width - crop_width) / 2.)) + return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) + + +class RandomHorizontalFlip(object): + def __init__(self, p=0.5): + self.p = p + + def __call__(self, img, target): + if random.random() < self.p: + return hflip(img, target) + return img, target + + +class RandomResize(object): + def __init__(self, sizes, max_size=None): + assert isinstance(sizes, (list, tuple)) + self.sizes = sizes + self.max_size = max_size + + def __call__(self, img, target=None): + size = random.choice(self.sizes) + return resize(img, target, size, self.max_size) + + +class RandomPad(object): + def __init__(self, max_pad): + self.max_pad = max_pad + + def __call__(self, img, target): + pad_x = random.randint(0, self.max_pad) + pad_y = random.randint(0, self.max_pad) + return pad(img, target, (pad_x, pad_y)) + + +class RandomSelect(object): + """ + Randomly selects between transforms1 and transforms2, + with probability p for transforms1 and (1 - p) for transforms2 + """ + def __init__(self, transforms1, transforms2, p=0.5): + self.transforms1 = transforms1 + self.transforms2 = transforms2 + self.p = p + + def __call__(self, img, target): + if random.random() < self.p: + return self.transforms1(img, target) + return self.transforms2(img, target) + + +class ToTensor(object): + def __call__(self, img, target): + return F.to_tensor(img), target + + +class RandomErasing(object): + + def __init__(self, *args, **kwargs): + self.eraser = T.RandomErasing(*args, **kwargs) + + def __call__(self, img, target): + return self.eraser(img), target + + +class Normalize(object): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, image, target=None): + image = F.normalize(image, mean=self.mean, std=self.std) + if target is None: + return image, None + target = target.copy() + h, w = image.shape[-2:] + if "boxes" in target: + boxes = target["boxes"] + boxes = box_xyxy_to_cxcywh(boxes) + boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) + target["boxes"] = boxes + return image, target + + +class Compose(object): + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, target): + for t in self.transforms: + image, target = t(image, target) + return image, target + + def __repr__(self): + format_string = self.__class__.__name__ + "(" + for t in self.transforms: + format_string += "\n" + format_string += " {0}".format(t) + format_string += "\n)" + return format_string diff --git a/Deformable-DETR/docs/changelog.md b/Deformable-DETR/docs/changelog.md new file mode 100644 index 0000000..1ed5e79 --- /dev/null +++ b/Deformable-DETR/docs/changelog.md @@ -0,0 +1,3 @@ +## Changelog + +**[2020.12.07]** Fix a bug of sampling offset normalization (see [this issue](https://github.com/fundamentalvision/Deformable-DETR/issues/6)) in the MSDeformAttn module. The final accuracy on COCO is slightly improved. Code and pre-trained models have been updated. This bug only occurs in this released version but not in the original implementation used in our paper. \ No newline at end of file diff --git a/Deformable-DETR/engine.py b/Deformable-DETR/engine.py new file mode 100644 index 0000000..1ae2ae9 --- /dev/null +++ b/Deformable-DETR/engine.py @@ -0,0 +1,166 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +""" +Train and eval functions used in main.py +""" +import math +import os +import sys +from typing import Iterable + +import torch +import util.misc as utils +from datasets.coco_eval import CocoEvaluator +from datasets.panoptic_eval import PanopticEvaluator +from datasets.data_prefetcher import data_prefetcher + + +def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, + data_loader: Iterable, optimizer: torch.optim.Optimizer, + device: torch.device, epoch: int, max_norm: float = 0): + model.train() + criterion.train() + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) + metric_logger.add_meter('grad_norm', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) + header = 'Epoch: [{}]'.format(epoch) + print_freq = 10 + + prefetcher = data_prefetcher(data_loader, device, prefetch=True) + samples, targets = prefetcher.next() + + # for samples, targets in metric_logger.log_every(data_loader, print_freq, header): + for _ in metric_logger.log_every(range(len(data_loader)), print_freq, header): + outputs = model(samples) + loss_dict = criterion(outputs, targets) + weight_dict = criterion.weight_dict + losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) + + # reduce losses over all GPUs for logging purposes + loss_dict_reduced = utils.reduce_dict(loss_dict) + loss_dict_reduced_unscaled = {f'{k}_unscaled': v + for k, v in loss_dict_reduced.items()} + loss_dict_reduced_scaled = {k: v * weight_dict[k] + for k, v in loss_dict_reduced.items() if k in weight_dict} + losses_reduced_scaled = sum(loss_dict_reduced_scaled.values()) + + loss_value = losses_reduced_scaled.item() + + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + print(loss_dict_reduced) + sys.exit(1) + + optimizer.zero_grad() + losses.backward() + if max_norm > 0: + grad_total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) + else: + grad_total_norm = utils.get_total_grad_norm(model.parameters(), max_norm) + optimizer.step() + + metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled) + metric_logger.update(class_error=loss_dict_reduced['class_error']) + metric_logger.update(lr=optimizer.param_groups[0]["lr"]) + metric_logger.update(grad_norm=grad_total_norm) + + samples, targets = prefetcher.next() + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + +@torch.no_grad() +def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir): + model.eval() + criterion.eval() + + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) + header = 'Test:' + + iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys()) + coco_evaluator = CocoEvaluator(base_ds, iou_types) + # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75] + + panoptic_evaluator = None + if 'panoptic' in postprocessors.keys(): + panoptic_evaluator = PanopticEvaluator( + data_loader.dataset.ann_file, + data_loader.dataset.ann_folder, + output_dir=os.path.join(output_dir, "panoptic_eval"), + ) + + for samples, targets in metric_logger.log_every(data_loader, 10, header): + samples = samples.to(device) + targets = [{k: v.to(device) for k, v in t.items()} for t in targets] + + outputs = model(samples) + loss_dict = criterion(outputs, targets) + weight_dict = criterion.weight_dict + + # reduce losses over all GPUs for logging purposes + loss_dict_reduced = utils.reduce_dict(loss_dict) + loss_dict_reduced_scaled = {k: v * weight_dict[k] + for k, v in loss_dict_reduced.items() if k in weight_dict} + loss_dict_reduced_unscaled = {f'{k}_unscaled': v + for k, v in loss_dict_reduced.items()} + metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), + **loss_dict_reduced_scaled, + **loss_dict_reduced_unscaled) + metric_logger.update(class_error=loss_dict_reduced['class_error']) + + orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0) + results = postprocessors['bbox'](outputs, orig_target_sizes) + if 'segm' in postprocessors.keys(): + target_sizes = torch.stack([t["size"] for t in targets], dim=0) + results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes) + res = {target['image_id'].item(): output for target, output in zip(targets, results)} + if coco_evaluator is not None: + coco_evaluator.update(res) + + if panoptic_evaluator is not None: + res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes) + for i, target in enumerate(targets): + image_id = target["image_id"].item() + file_name = f"{image_id:012d}.png" + res_pano[i]["image_id"] = image_id + res_pano[i]["file_name"] = file_name + + panoptic_evaluator.update(res_pano) + + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + if coco_evaluator is not None: + coco_evaluator.synchronize_between_processes() + if panoptic_evaluator is not None: + panoptic_evaluator.synchronize_between_processes() + + # accumulate predictions from all images + if coco_evaluator is not None: + coco_evaluator.accumulate() + coco_evaluator.summarize() + panoptic_res = None + if panoptic_evaluator is not None: + panoptic_res = panoptic_evaluator.summarize() + stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()} + if coco_evaluator is not None: + if 'bbox' in postprocessors.keys(): + stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist() + if 'segm' in postprocessors.keys(): + stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist() + if panoptic_res is not None: + stats['PQ_all'] = panoptic_res["All"] + stats['PQ_th'] = panoptic_res["Things"] + stats['PQ_st'] = panoptic_res["Stuff"] + return stats, coco_evaluator diff --git a/Deformable-DETR/figs/convergence.png b/Deformable-DETR/figs/convergence.png new file mode 100644 index 0000000000000000000000000000000000000000..37cf241dce72a765aee1d5d3b5f6a8cf39d42f32 GIT binary patch literal 54636 zcmeEuXH?T!@OBUZjX*2}FklcXMT#qdsB}bBq_=<&h*%(zCZKc>3{jA>7H|X7r6mwR z0!kGS;)*mO34+up9U+9!!S{!|>$3mzz8~Lj|2=2V;vT%eduQg(JoC)VeQ0WIAOwbi zK_HOO#S7=mK_I?Z5Qt}S2S4zCBxjo!f&cJ$n;YnY3R@+|fj?}&sbi!A0u{&YTEp-G zf8ObN!PXlD5^mW1mxtz&dkqAl^ISZqW9f&U>f2c*;amM9bb=L7N3fC_dsge`FuP#= zdvu1#yeRjkd`Uq1!9$7%Gj?2mR+5naRA0vQ-2IX+-Ou{AC1s-5 zaKFP{iVKq4Li;ajo*wx4$Hg0ulT}XpYe>b-T>R~i;K#Q8qe(O5o4|&C{*EIY#lH{T2ORe{wfhX+TckJoP1?KxA5K(X7Gzi2G`)4=){%#+OnsU0X zczb{Q@C+YP_-2Ht`yhw5^qMEwc%j=FB60fmHB9ZxwuhbGzMZ4T+ise`_KRwJd;aH? za~DW#SuW$tWeG*i5Fa&&-)Kt`bA~gNDjlrJ!CpgwTGD3@fBwuB#?Lu+fxH$CRQ{)rE46v zr;k0Ueg*b?Xt{#iXIDr~IV!J4vUPQ1qG7bzwr4-|4vlki+~roP#$HYQ@H5Th?!Wcd zHULHbI*F>M`k=Yl1L84U=rE6am7q_{7dwQeK83}3`ItJA-J`qk7S&?eWhW-SCh}#1 zlcvmk8N4w(4-w>w{SYt^jS<{+P%C)gKYL5>iKh4P-mzWv2HVh6!&QWOqZYytCdBM* zXW4^_nv{t`bB`7yg(L=kg*msoM|xW7DCXi5M%JaDtJPu$u7f~C{NIvRRaU=M0M))e zZ6fkMUkaZu4RH}T^c9;0sNBNaqH~@|<8^ofn>I@23vQiXW*hvdnv@cvYuap>Bm~rm z{a>~>!m)b{e(?!AdWo@-yht&X|JopzOYJuuUAf(#iu;ZvW>Bwd5Skw+?*!n5va%&FYR&l(IR6FlSjz6ucSd zk*Yg6{J2Iw;DFLf2%e|dMZ8+ElVYYhCG=Q3)jtKZXVnfYz>^1roL&(tkPNmkW-r1-?yB2 z92fXDrc{EAftKbwD#4FI&c(2fxN=~vPZCd1HCfG>8G|UyZ}HgWxlj8*FN{}%x7?-z zf6FT;1+|`BO4-Oa8_Q?=btqG@^5x+pjiU;1aT|*nPn5w(n_6!G?v;M@r31g&Rdn5A zkUvH9fAQ4wTDebqn->4=wE##v^Oty@P&Ee6_X@>wV6^RpE`K=3cCH+>cS2XtOe8Gb4o`NiQbbk$5v$~gV@USFoCrl!VaJLpZ}%-JD!|T@En85l>b+IerJpBN9~_PWaaTk9wpYwbn10H0tB*&o24eD#t9N;dBWn(@dX*LPv<6x+p2?-t)V_wK zwgT^bK3I!b%MQW4`6tP4`NKQ~^|tg`E|De&_7Coz48~Kzp7gjCdLa66w;mc_sfn&0 zWzDhkD#Hh6UuA6|LOc}p)>qV*=Nr7n%vhlIGM^K?e`{J04Cb+VVyU*Hv(d7uET4JK z>H+dOR&w>XcQ_O-IJYw4N;8?}r{$_b#*VFcVs`ZAl}^>PyecH{yo6rx|)FG7TZ#QGaCm5iH>IWPSmwUuizt0rIN z&c5Ey;dgqwTax#$0N`Xg>*Uv8<&QVOr$|$LdmimBbx>rKo**V-n7<3h|k zEoVwk@p*&!tTHWhlEF#3ZU48jNOs?gm1p={dmbwRmuYSk_4OIfvF?dpv(Quq6~{N(bBGgS4*^z4}+n zyHLu`uhRLKs6_34ld7tv;pj>7+Bc<8*^dovAq%G}M^gg9ZJ{fjsmB&15QtP?xZBw6LHTS8w`*xPrhxERzQombR9WYIOI3sk$tqxb^LN;^)b;)0W-+7uU^Xp4MIQ z5$wC3=x~cvP$nlS!J;`HkoOgID0MMQACT~WODJJJX!_L{yGUVuNM;Bg{$fso-WLBhm!k47$?2)3~Vtm5xTwqzBu92Bb1w`kYbmoU;&g_9)UlE0P+? z-NnQrQi9g~d)q4BbBUk1r2-COe7C_7RvF`>QMz{cci=}y$;*>E{o3H1oLTWconD6@ zMW&Tq8=raGsr++dS{Wf4>I`osn2EZN_1(X3&)c-e*1Q+Qu7WkWHQ^QzJzuYc@?+B# z6)0S3B$bk+>=)a)YG)%|Amz(EBjE|2sF!HLw^`NOrC^(Mjcf!}5YL7X#t0*yxFXr7 z)@u>lw0l4K<74q>5-#;DM>elUOPSTm!K`UP&>zR|2Ko8^@=xeXwR@W7Hm)z@4Z@MOf<6DP=;o$8Kr7hHTE(E-8 z!K-b9ZBz<5xBhOG6AGjK$Rqu_9);=AN)BVo&ec8RS6h_qS?OM=20MMR%8gDNg<8LO z$--Tf&she(OypX)T65biiELPt+$Uk37sU4u5?&;w`N~3Kczh5L57IO28p>f>s(hF> zo^?ZZxHA`F1R#*V;4QVYk?8oSNzdVj|1Fe{4y??AjOo%4jP>1S3LWyFbOLeF?{L)}ea|n|^AI6AQXouq zODbVu;7A_7xyTt!M3eQ10tK`?w^HhtTRM8iZv?v>pn4Q+1GzmI6#pmB$a7X=_&ibS z3o_O#KYytZUnm;BBXCO<;5P!yK3RiX^LPJ zlra(Memuo5!|0FSUd|hbYG`QS!%jnlPK9o))vJhX$d$g(3=WXaV!ZA$>a-4>FyN$Z zXuP*}A-e|@hE%(8i=@AO&MP;b@+rt+#UlE^mbaG(T^`KqY`S{6tdr1+{+8MF*D7N@8&hHPC4m( z{`55~cDvO**W{+0S;i`Nup9;n4Sw#|5x7x9_%onz*%;QkYSsZ{F&xj+T@D;ddv~S( zT}bvi6;8G9cZh2x*gWVH+)n$I1Rn4$&Ym^Y=X_Cg<4)Z0CZ^umQB0CXes}L#_#(c? z;j(l=xFaN3W682;g8Hy}Oap@9+TSZdPj=|pLaY(S^p9NTgriQuV$PsdBMutK4zW~0 zb2Hg8?-{F_wXhtf+`{;>wpVXIC=y8h@qA_ZcH-l`G@LROw ztjMq=UF1w$AOG}8V)D5FZ8~=Fb){?XKFQ2z89%Fo`GXdio8nOcrc*q*gfy_H#v?0s z{OK%H<^Xf4fgQO%`KH1Z26m~IQi2(G=3B{smYuh%)kG^qECmrttB59MKQ5+wU0`vS zKNStBugtjy4V9>pvWpKR`TxmYC!R+m)<2>-`8VV&d+_DuU%u~%txPJ@s5{9o4LJw( zi^SZh5-rv>p`0c&!S)DptrR~ATX+cGbcQ>MDXLu{R(fHa$aa;AI8^dEPPW%kBg!7T zV~`}Sedla^4>OK10{)ovAA?k6SQkuG^1LA4cS-DN`XsA%;4tz#dZostw> zN}b0(%hnZRX@HR*-*64n(dEewzs#3}L6I|<=%jAP9DHLz)}Y;x*V&JTXSWLCK{SG{ z>dy2mvj}GyyGHMCG4OmOoc-3yZ9YJTtlf=c2cV`zLIUtPfAbL*R~*T*v*xFut22%h?U6fTJHB@>jA`d%$k{TM{1cJ4 zT%@*~pQ^}6IALop)106n@0<5^MhT!3x{g*78?gPBJ=8zI(#Z3b&b^s>xu18^Rjf_P z#3qifg^3T}mOF|S!-0LcH6L2k2J9#7RBpB=&jq4OKB$n(n|r?3-NT+sy^9{#mzd0+ zRrYuyxI%kZ(G#1KV)uEWsLbNjhS^@#>5Oj^32t$*+0GGaCh5ecQpYbEbIE!jkg3v7 z_T=fE;KPyXq49ao)u1o5!MXZ$VS2UvMFX&Fe;$xK^Z5ze+M6w*v@iMSeLaptA9 zdPV>IcxQ7uJ6fp*H4){m?J4*Vdjotw+tg;JB}t7kD^Br}gq^)}t4}OAuc4J_C0P47 zyyi}Z{}JSOlXMr-a*PNmIA!?91Jl8Yh9g3w(zgoi-hzWU43UM<*mu#}9OXh-mb1_1;*2>I*yKE>I02pT6AM&$DQ z=x8covU<$$K$!*hJr;ge8foo`YBS=8TFO2B*tw=>cj2sQ>4sI;m-|uK2Ox#(oU9@% z9rB7rH%Z&Nue!`*O$rM3qHDPJxh@A)=cZN-kp*gD#%&0p)KHDZ#kDscyEJUr{<|t& zPY3OvuYr(E_q#av7qXw2XxdJMyPMCP-VOr!|75~)vZ4_PsdVDo;X1EooTNO+;sN8k z%FK(slA}VZjkv~trxR=NnTBQSCrtpSJx)YSRGYMs~95( zh$E}vVdSpWoud|NrHz#EtB~hZdrc+w^d!rs!#gde z<;B_Z-IAS!UUv`u>y@G$$^QGG;78YA(2>?VL?QJ(zmHV!(|w~pX`z!-e~}G)gGsZC z&d$VGig+I5%Yg^wA8ARfH=Q}2TvmT|=KEwHEi=q!W;GDau{|o;l;kt3jXEmXI{v1@ z-)ZvRYjhxr>6@>r;q_h*r;@y{$Ht1K0LC zv_&}kXy$)$`vg({=5+mE<(DJ5SPsE9d z!nf9)^NUcqRnLjS<+;T!%6;iqmyox_r?6-Z875B3m@{6#!D{}_Q+7%G^{QXh56=VH zJDFC<;4~q?MqHX4&QQ8u7SNi%c;E^&f$}>g`Fm`7yJl z{Ebq3VywXHq2E?+ACFP0F@C(kGl;lXJwg^Jmln#zSk;f;$K+P5PAZFAEf)EWzo&{jHH_B+1+IOo7aM6p6GsvkEFkD@TAkk+;>6rt%f;!9FN2OB1PH(cgJ~P(a9@e4~}Wq_R$B$}x~C zQZ?U$*8v%A3)q(PY_h(DqubvJC|Im& zx%Q_Dc|=}NzSm6s$blW2LIt}Z7onzdxtE8f^V*&eGUeFHpJbm}cC7YOqME+-1zmBe zfS(A|l7^r(wl6R-Mz#>2#>9&(ORtG+Q#!#Wl$an;qM57}S(Q{v@lAIPc#uEL$@$h< za&suXV8nsdUQ`pSq8URQHR*kyNGpHn_jZa}hzRw9CAK+cs(29L?IWc>Y;^AC`vNqt z_Nl)vzMUA()lD*RF>5b+4TtpI6~Q7l1hm-UanrZb!kzh^Y8pd~p_+0_mg0HJrf~sE zg4gtC@K<;;VBco-9~kV^#=IYFjcr@V6EKER0qL;bq#z} z!1N43pT!8&)WjFoR1p)xO(4{Uo?>`wWc_S!gN5TwO=Bfk;$2Qika((1iv=x)-&O_0@spxFjZ{e)(=l>00=RCiJ z-Ft2VQL9!h7AQa;5j3(=KN55I$!vAsoyovLkOj-yRKDMVQthV(1#CePS4Kqm{dsG(#o#oJD0DhX(uLW2A`?(65E1| zVDZt^QsQ%4Ukguim>I$)_=POW^1V0^2t8E=?bkJw`m69_F^BVNl^+N<7|nd=>|q?U z?oiKdqQt3vMf{FST}FF3l1LtT}GT_H;yh zy0JR@T+-RQ?&Hu3p)D||Q;E@46{h=E{T|x>)V9yoJ9sqtz2rYV(dYym1swOt-aqj%M%saWG&g<@$(_<$ZiQ%Uy8n7~xB%7S=G&ui_Z*^Gl zo=8kyEhQVK{&n6r&3)*M)zbJnNydeH@G~uMsDTL|oA6bCPM;sXmk*g?wJ))#_!n_n z$;Q^ivZx=OI{FlWeQxq@i*;r8+D~@>OvipDO4(>Kf@FO?hmS^V!%|UD(STgQ8kyes z;s7o9?2RE-r@CLe1pkML5ekRD0^JEvJJh(&ru4` zqnzi~Q*}lGx$xLJsVnJ4dj(sf)<4EkgXvfA_085@>O9rAno-OFF_<^|wQZU+O?yXe zkD@I;@I4-*FA-GO^{(ByRd*ec3(e5t0@p zR>Q?AjJ!BEJ0i_*E)LG2I!rh3RfK@9wRd*vZM~PuxhHH(vP=^cu2@f)y8`6v9Zri{ zFM?_Rq{l?Ts1n7w6OdFF@!`%(C(Fj9o9*8Crhvi>J0@H(ut>#5Q}PqHcM+u4Efq_(01PN|_D0lE}DuywWLvpb1Lk)n=h(0d>iFggnq5Yxb$ zl%@^_sM3|u;DO!me?PoS%p%`Rl6dN-aeC#7S)&YEf=kv>Mij1Ij0nQhq_q9~H4=16_k&$8OKgI5zJc(xcZC2X8103yolir9O@pf_cvHdD3rAZCn z=pP2V8_<03z~1XrHH|$U{PhEidn3Qqz=KeT!)67a`Hd`id@DKM@}ZKX+I#M*yl>AS zu740o0&@6qE0)N)wz${>fB_ZA0u&0UYqKcY&bEx`Zn)|J{c zOCCq44T<;wTx!3j2dduP?z~5~u1-m<7{zRAog^pfm&FZxa7;ODxo zFuqrp7o47Nf44XxE3Z^}`Bc&pI;G_DJuek_%*;yzsRBO}$`AjQar|QSe8?ILPqU#^ zm(9M$et0?;NSP_McoQ0Pm%CVFgZl27EZ1M#1P!HFG&g&XiY8o~JUSi>eo4R19&TJ9 zXst=JtM4e5f4OaV`iN10mf@SRjOeKnm(+y$e(k3QnZYkdY&eC!g`+tJtMyqSuSDIn zVhozST2GHYWj@S))}?iQR_p~((*CQicZN}Ny%{WKY>{KE;5(iMqrnVLC7pD!j&Y{Q zlFFj#U+=?TsF|e?Qwa^%XYD>PWR@cvfF!7O#oVNEk&SeLMzCT!cqz0O2ryd ztmf@!qYgqY+>C% zP>1Tg>|Tj!3s|H*7sZxEsU6;vpk7x@$ZOR@$JorsE_LA`8r+a%LW&|hJWAL6S$sdI zh!H)aK~5)Sup5_y0%s*7OJIZwl8eMRl`zEiw!fPM^Z8#9h5wosSHyPw!pI24WGD})gS zE*I`6LM3rE^`AcaUuIDgcgA*%)T`cnHK$$NF^lRHj`*Oia!cLwGT5%uZ+60Mb>%nz z_v81-0!aanIJ2F!A=dmMAn*Qd!i*9efL0;j?Od9-?U4uC%qavI`S?=&pTJR~Qp!ly7t;Mj)B(bV>Ie98@R^}mIlDv{y7@31g*zR5 z8rZ3lrUrLkWHG$GNw%B*I>-(_6&Yk3yrJ@=rVz0nZ=zJQ-j&+K;$*mDH-b(Fy>NlQ zqh&6429BY(q+t=qkis~={DNWIS5cgAaRn7JTy6(BTPX}kWp$olJFWNeXR+Ir+>A5Rd$^xck@t=_ zmC%+&oAqeQTwY zCJ+r5ZsM)86FC#morv&Z_*mJCD@N+%tgyXSGYNUpz7{SM&4bcp0kRzULAC65DZ)0x zP{&4IbZWvP>D$~gchFL&peZL~BN#qE*13~56x3gzaot)g0A3iqRMuWZG|<2j+3~7h zS%y8|rBi3J#+OT0R)?EK2N)A0>+k$Q4mk8V&b~q=AEjT&=U|S`9!Y=!^n$BgMm3-) zzO1vM{a`RkMI+pqd>Fj(xE=|ygGQ}z{Ji;$`=Frw0p+53FC2>UOn#y*>9i-TwUW?z zF~1ndPu5pf<>13-AQC_#84QtIrxFU_ALM+X8gYq!i+<1LHc|j>XMcOtmXqk&M`)hV zzn4$ZV}Bj0n(@rY~BN!zQ*ta_M2DgY9tQ~QZ`HMS}2<@d$R=CsoXI7LW7jm~;4$ZlwT zK#RSZTFh~{SPf@ zhwBQoZ0_=fYALy#gh#_*tpT zBzPisu6Ft~6fyKBm)88kCOGK_E!UTBA2X}1iPeMa5}Id*rC(o4&YtddNn5^6I>J|r zNfR#JyG5#3_-U|Irl@g-lkNI_eYH%^L0iP*dy0Gb%ZKaIbWI`Kxaxwc>u)Yy(8($H zC1Ha7j2iSz=`~!~YjC*6Q@YCh7$q2`G=zf~(kX4B=BPaPq2Df4Ir=UVYuRr=V4G&*FwO zu#d67?a;&cM(-jpZ~G}+PrV^-5AbV&hf7a@KT%^ntdW+|C=((P6p7_0eQKk@f zPxqgA1XKyV>WbaZj1Cp01Uj9-a!^M0V|&Ixa2adTP+q*st^7LKBFjbPH14eVQSvv~ zokb_6l*WDTjU!cl&kKr9$qv%JAg> zF;%Jn5~`@_cyY1}j^1@8?&5q(0>%Mc?zPvEKis(#wnaZQJ{tJ?e2uPiT7&F4E7MWL zohLH4E`wLp-&?VFcdY_NB+Iz|pmW`E0?lwc0wU2WIovAdFnl($MeDUPnXxAOK#)cn zHRhxbvkHz$QlhrClT|{5l!hwiM{Cw%=B9rat-v0@5+{k229^6wD6uWx?TjK4{$QMk z_cBj$xE`f(Qhm44E)jaIKrmaojn_sB9mg&UYFMpc2&#~lLeDhjB%WXQpS=R%W5dy_^PDa;T(DH(RiQ_k)z%LlLe{L! zUF~fQ;he4JKXV_O7Vvz6hrEqZq*@#`c2baA->xR;0$q{&YSn>+D&vqdHt8iRi)mez zff8kM6L8_oVIA)((LyvS5arxMli;VIT@eDL{=TH3_NBGp z9Bn#|Cvdl9WGx0X#@b1^t0_D>L~4tO8d36yhe5nFF1PO8zIxlGg5KlDue|gN3W81p z=qj7|wxwz?aAkaWX;@#a!9rT){Jy-%TXus<=XCM}-eL;us$&VMFiN;3z7n22aqOzpBD+ zVv+;CHYZP5?}KLCiCI70e$KT;4)CQ1=?y5p^Z?8DaYRhDXpulYEUqFibvBqwQKoxH z<`k(K_gMvrL(JFT2(%EbtNeE)-f^zL1|7JMNWAiAqP*)1&=`E{XZA^wc7pNm(9PoS zwRN!S^Og!ud_~~H(979uWu%~8!^l37e4yo}x{~k#^9m|A#PL*@gKx(rUsMlbRLQ$A z=7S{Bm8iB(U1#X%?8w zaC3+abT0pfhxlB67M==k%J=geM??ZJkmH4C3DU%351$x{nXnYuar2vWNu1ujqVhz0 zqiVpZka1fMF?j-M9WToNPVGy(m}F`IHt%pdEy9e#o}3nvAG1QiM12<@i}pkg6^7r$ zzX~X1Pin+4lVaPw>5pQVtyY8NMfF?8^29~;pb9x#9BXB5pCs>?%Kum&DQUb~#fhR= zi5%iR36Q%I4d4sawgOPdm4-po3`wS&3#$Y_hL+@qN}Wd#s$&L=Tptu=@s+@Ax@$be zFRWI5b&IM?v^ok&JqGgGy9K}aib{G^`<^^<2SyN}Rp`J?Gn}k36sEGZ*$%P+UD(e<hxvlXT%sQuy=_1Xb4*#~70qS{G$Kl%0vz73 z@UVecb5T7PD%IL>wR#j#q+lyda`fBuzSSihz7=RL;Rn4ZY{5X^>WZ^-j>(Gbom_r? zveCe!WLrceQ__0h?hw{#sD>DQ&?@F?&gjI5v==G~pps%bX{19FZH|Gi=X*b75I<=` zhN_NrsNZ%z19~5~W!aZND%hCsb|T-!3JIWIRrsiwX-B>q`1c9_Wa9H3xI?4^7>}q!Ct%hr zD>Ec{w{6_Sz`$KLh6nT_VvDqn))hw)$;cV?R|VyY)qBj`u2tWi$a^X5aue+KqW((g ze35lq15jR-1+p&*lFT*h=TnHM4Hyk%ti=P#ezNg8t_Ma);RT%mSjWE`8|-du&LwE$ zfU>{pohcN+qLwHC?Cre!sCYZjiCx@pV^-P3zM(K*)idiN0SA*V?(FcU?_D4^$_)uk zRb4Unl&qm-96LUrE79({28$!ScdheE9-*z$UxT)8ULo@>O6&|~|LNOV3RlX=T?RK@ zmaZB#$tgvPh0;=WbY>?pRSmG4c8ADEv#*T@!F@upf7_?$x$oQ}J!2R6CLLfo|7p zUE!d#Q~#2QN4t*T6>ryTH#dCEv&c!P@QF+S>JYnWIp6vkl=mAwLm;gcFbHV+uq7`_ zn%vLnsRMggHx*$eCp)6+uR>z0mmg*gcH@q$E*iK&X}EKNrUj4H<{3->(X>Lb=TLpU zek@!s{~r0+i8FtO@zW*>7wL@R9<|tk?QH|ck#J?WBr_jBi3qhyhLDI=8!1ucX5=E9 zwoP?gg9@0g(uI+`Jlw@bJ5B$T0L=W%u77oY2&j_JqA1+lq_25PxX@cSc||2ju2CnQ zqh@lXk#JAI$)qJUCE<4fb@|X>N%SO2x)dvR7V^B>k$Iu=gXZ)Sk_Qw4`FBBGOp)U4 zl7u^q*)`WCKTk5OY2y1)Bh=yjf5?6nO1iiQPXc?h5J4LAR#Skp<{ZxA*~eeo z%IgS;h60VYcnxtz(W#rQ@~}cexrim5tcmZpX}H_?rBoD~96V&*0y| ztkVlBXuM94-{}tP{y)IK(@XC!j&cSm4d%H1s@fa;G@?|CJjlrtw*(A+g$ZsBYS`6# z#OA;nE4t0hPVLE;&I>EIO)vBl8p(puz+~tlz%$^J@lBn(g(#lgxc!r6)JIlb{RL`{ z_wv=JtODz#`6@A;yD>2s%0tqK%v!eJ1{zgA*e&&f&?3hse!%2G2LnR8g!w^V>9fBg zXX$<7{dWD>sBKi`QY$C&_n7T8aJq)F^(iR3ry0#cqDAaSUk*)-sMT3$lhaUbO0`u= z$n!VgI|zt78eKvjNHPA(eo9K9_|#0EAJ9DW45}CyNE=!LsK!7eV18&0JGObgM?cSZ zNH7*Ul|Mo$SNcK*T*E2c8oS+?Wo@Q#&4`)w$~4cK)4lpojJ~%Zr;1$k8!V)8Ce#DB z*k3st)RImRh1(eY@faz1^o{+U&)4yT+_umS?>D^-5H0($JIxB^Pb*(|x1Ro{3{Z|* zn#r{k_A>HjTZ%)-_lD&q%c|Ak#^_z^E0aa{Iq{Z3YZqtCHmCQ@3x4kI=lEXxZy}tD zrIy*MYS#I1=bYW$G^sMLY zb4i1j#Wc|(OL8dT-eZLXrjvESFt*C=o8K!@s>}0=qkJ^9&nxx$Oi!!SXRnpNa(8zF z{LE?Ti9#k^bTW8`mS-Ea6gb2wavYWRk`W|YsRSV=21fUR!qj1BfJ)!N!<&^pUt>%$ zbA$V72P6vz#D;#|^@7g9r7nxrlu3>G%-yO$2-UNL(!s(W_j4q^0-mZi#{mBZmKg3h z%2T|;9?k#5UIA$g4yGHeuVmIwVCO?L+-~uL+Q;7P1%Yfg(O0Pu;#B%>n--gEFGHX; z{t`{#XnIqzVs4et^_{C0M$V9Yx<@~()o532qLPxT$q?oD3(~jDTkIje)%T5za_{RD z>0ZM(<~xwR@*H#GQS0;b^OoCN(1x}xrte&z`;AFJViCeX5B;`ZZukswV7JlDxeF;*9i8~@L3oQ zFsbu+pHwlMtuQ}EeTuLbUHJ&~m%ISFKV*b9%TMupHd%CCJ zSKr$DwI`jQk$)f*;cTSWR=qx~oJB9S%6vX33R3RgazKPn@xtnWSOm*ZQPx*nvTA$> zeS7N%+&%*4m6naswdzs*dtTy)0)Yaw#FoEB2YREE2J#eGj_dj2l5Iq*0B6rx0FHTD z!zFPj0Dh@P(fNz9!huL~Pa_ceBY^=n5Xk577Cih#3|GOE5^451^vAGsB0nu4bY*`0 z+bHaOgCeRZZPtEfydwew#W(en@lh)_Enn@X5A%YQQCl|gtp;V`#!^`P?)>HKOqA2_37%>>F3T->BU(1kNKP% zS}SJbF?9`kgXGOBGNBF#L3o-prICy|x1L+z3<6aE&APujZ5`^0KO7v9_EcYs;ba5; zDDZ3(`igCf)w{e(EIhj3WI_P=*spl>^J7NTn93u_g`PSS&E;$9pLdR?Q&!mV-8iYh zCnDe{tYH@L8o;IjF#ei9egibI)b^Z)zZLK){PhWRCJ-o< z@qVt{w1)Vtx&iib(wX1alYB!)eaVU~@%1P*kZe933>Y4$WJ{;c2Y{*CFxH@UGuQ{8 zGVT-$Gz2_i%31qKO0^E~{`c$Oz_A{$)90_wvctMP;>94b(A{mRTD7zG*XzRh@Jygf zpb5}k$rj-qLMnoBqJc{iHP+f|!_i8jDFj5+lW=DtVCAj9_rd2cAp`tN7iM#L9CS6a z{CZUYcS1MxW{Ur@O|2N6+hVrzn+kz4P6$g?zdoKCRAS2cd6(P8|Gikp!&!&uhXAhc z+_adXIDgLg_;}u!$>tzI*wj`fIFS2%nQ6kY&<#1Ha8g>lvS8t{DIhC-%mdO^-IDYF zp~&#Ds1mNowFrC%zSnT<=%-u-XOHsN9fPQbNwXFk5Gd@{mUa1^J*TFH+kkoQuA?si zG<-u2u;B`S8T**U%`2fDvSn$65<>(wQyXdk1h~xZh%a^`ih;9dy+=NpGY>(+tpJ3# zx$uAT@sY+J#uYI=)=B3}2YOPOhm65NH$Of%lL2O}S}gAmd9G#O+30^NYjMfpabDgo zZq9DIHu{a<{(B$tMjDCzHm66Pn-dmthz)RlTJLFVaq_`#y`d9z#c&dP!?F`KEzkR3 zf#svg1$I_~F; zU|=6DKm+<_Q1*2R1$#h;S7K&{(<#8Ozw8)%z7X?YiOF)W8o9wHBmEBeoMd)(TI60Q zXN-bLqiaVSG zFftmz6~uqXD!m~t`G7#$cebeXp%|{etT+mL;4TP)hYnY4I@F`2gxmI?kU;MmFUVBu zUrWt-7foH}oK-|JjAljpBc}xeV=x}!0NTvC{D*l+;dgA$Aw?V@*ALjjZ2$_}>c0Jo z?=%QCw)_29k9Ipyej0!wC&{zOuXPk+k?Dge3UqEWh8#i~KSlsD262B~d~Jg(Z*ITb zBoKD{^apWT@U+br75b+=fG)5)c~5;CsGo5g>(912T`0b=eE8h7H`z5!o|Y52KcW!LEp* zBl!=>i_k(^?;&fo>evAhrOj*o_U$G0&EBp*H*xlvo_hSQU4U1HkQR!Gmg2y}3+ZE2 zi@Ofjri*ECJnREFfgV?ZU%&plUvYL%Q@-yPo?&2Ix?F3D$J5-?J_dV|FntU#+L2#n z`L}7OfOiq<`K(Y$o84OLx>u_y?qEI6g({SV8rAy?50;NMyzUn^RRWv@QEs0*eAkPjRoA? z#YwbLZ8Y^;Gra>CZwD?}ItUqZ_$<|peCH8~o%AK|KB9!aU4Jby00c^-{xWT#y9{Z) z+6b4VC8e4!{%V8*+Tk{*Z39lINA@v&{g;LayXt0uiCOCo2MVk2ag+efcID;IM?F4O zWB@OA{PnoS+q8_Nul_Bpq8R6DcoB>iEed3^_0AGwV{uvB-&(gpDXHQ)!E<~jCVt)dwQ7{nC3MA2qz>KyDWXFfBX_(d&gR&ABOFTY;72}Tc>AWQ7&$Xr>xu;$fsw&NKZX8Sc-G($==W(pQ3#Y zr^Pn`#C*OIa^cvCs@$_6(Aec)w@lmkVO{Yhi3iMMA?!x6U40_QHh1yg4>@~oM$g;R zSNnD=Wx03em>l19vhka)_U|WHu;qiVb8>Rp&ow^Ue+rylgxyc5e~X%y`gx+yTTi5b zj2&1~R1DhWgTq?4xSy_>t40(crJm2=)&oOZNA#0xbA~q$2Bf9`R*D|hv4zy0qDwc! z(TBPsBIyBVA%>pnp1iq|_RU{D33MBqGl-g&2y`MF@HKd$n>S&QDlk;38~7g7F5Bf^ z>Atf3{&1YBjDwk|651YFW3vTZT6z_i`HY_tiiK{M7qKqN7~LHBtnQ>mIM=$O3!=?H zE@EvOi}unN0*42J{pNQ~7;o6;TJNrYCq zc?|Y26Tq`Q9U1!3V5-tR_c==FI%)Q^V?O7R$bbGGbu6Qho?aYMSO9y6eO!-|m@J0T zb+Z#rh8btsCFbLI`bexdWc6qnZ4vPbA1&b4p4vAr#QK&L^ZdaeaY0~ktzh#vrZ;&b z&}pikC;8we(7N9Q;zxu#`^ay4owI+z#Q53pke;KHXR0qZSozOW;CcKA9lou&%- z{g0Td$&noPqXTEl4#z^k5r{VGwP{L1)ud)YD6{}*g}XS@7~S+lRZ-zeftxX41TF$U zAEe0(DlQWackUAe3{AUS9Kaw8y=U&oT$m^3Z1Ti}Oz@G(opA_ZAqvg7V`30FQ(-bO zAO-Mq>ydpLlGZ}wMU^Wtakx$JJnsL4zIpF@HXF)7x8)qk1rdWNwWdy?xlI|A0hAP! zrT+HO4-3<}%t#C1HHCR~K_panr3fsIsETuxeu)Osw|uKwX}+9*fd-?^mb%)Up$6B< zbw9OH4tcqwlq>FdBlZ7q_nu)*W!?KQqa!*ZqJxM^8^?ku#X=7d6$KFimEJ|9hEN1T z4N(zM5l~T30z|1&LrFpjAp!y_LZm|i2}KAcAwWndA<6&X%sjvOJ=g#J@Lt#Z>CG1o zaB}uOXRp22+V{HGedvMYSqrow)%9~53TOWE14?hM!9i%!ivsdbK_(MR0_?8;>W6bo zL_1I=qFDC&RH>*_&xhKF9ye@4{R+#OMqKStOoI}}e z{MsfL`h6d57YTz2)>=;8DS-T-fToJ2eMz(A`({Lz{VqD-Gcn+PU`rH2wO!ZbW!Q-A z9Qeuh#zU&AK28q1LwlC2f6kWwJZjjtGcDa)&Ut=2@vs#z;5rlEz!LsKow80}QOoRw z683<6&B@W_@0uk;>FU(g@}j)Ck$SL%2j`7V&j3Ye{hQ$}h4@zVK)PDNb@g2c4gYQR zCsk#MR9m5Z`SCc4>YVPAcKkd6p;Gm9s#&2~WZ0+PhrI`dy9jqp>zlIwmubIN>vuRH zJC{w4o!BO}|5e?VkE5D)4<;P2cFoV#OO1*i+zgdy?pqCq+r_0EmWa?VS)Mu8kxVQI zs?B|F*Q?@VAobkWA)Oe0dTv5utLF`b$uCtu>=lC|`U&Ae`Tg;i+ocK?ixA?{^ogfw ztC81!G6Med6#AP+SHGPjDQ|BBn9A1*Pl@dEANk%|5nd%We&LS8Z-!$)yMy7U4@$h6 zaPv7aqpuL4r){mpVKsT49dDtZfsz;J$L93rj;fOC(j540YxO=g+6rm7_=|VYhfAO9 z$!7pHwf{Cl>}hT8_cwrjYt1~;OqzJ_Z-=+x&xLMuwY1GOw;Fy6?-<4`*u12&7a#TP z549AUlcYX9>TvZ=A1n9iNeYv2_5;thYCiAZhIU$~P`GE)o5=l@P+V#uzd7Rx z)R{&b+}oE9OoeHqSVC*X7UzJkjm<7`Ws#*fi*0

ka(sMg1KtrA&Y(1Jwx%BI zMAnZz8fX8K`%ZgTk}cZBIdVGye>d7p*j>opVneW5^!!~fI+U7wzpi*~yLe9abxl9j zKMReDx0gj)g`tw3#+SLBkV#w4if@YCwHA`G<%U#8PCj#DXFSKBin_?m711(KupcmS zqJ{#sY4wMU(EKw#x`12fYb)65X`4gN>HdsC{xi7wZNQSnrjyiuZQZnUy)D41;-`@` zaaki)>d#iCQOv^ANpUkHW5&kc`vHS2VLb86hq}1N;{37uB~P<>tI>7{ z=7sXK@W+Qx$tA#G11ry*>oJ!U$LJC7nwnhZM6`ZhpYHKZ=eqT0(W(`YPMi<{pss6H z?k4wxEG&{Y_X>GoK-xq`xiN^a?_^PG$pd3_1@iU4=e}~6o*HZQ4&RQ`+8wr356GR& zzcTQDFYl$hlJwQH&U#!e6cY(a_LoTN`iXV@?HQzi5@uBu(YCM=>sj5?`(!Op288RB z10tK%Ky_08GKr=Za`xqxQM=@=R~jpJ{sf`^mlpw=>AQ9rixh}_yk!*l%>O5G`0r=g z*SI)b8*GXed57O4o8$Ozn|3Oh$q$co0w9dgTbp*~`D@#>&A-gzB#^+sL~?uqf8`9s zQ2%yNJ?8<+)ziMLpY^rjd|)2#d*F@`7a0LSn}45YIrh(Xq&H6|AK0<&cnXwR+lMoO zUFt8Jc-(*Y2!K^8)So=q8~5qtfK#y9djKZ7=dT4UD!?vh2#84i#+u?LjYCaSdJ(HN zcPGCVv3NPHDe9`?C~InjM4_u(kAK6OCsHfGg*f)Iwbc0aUzpin55wn=2b&AkzKi2Z zmihahet5FUWf<`*2T~8_p^V3AFOC?Ke|$;dZL9U+7jHVjJ>|y<(twrPTp?`OUyUnsg?e9vk`N;JN$ELXt4(RPj_1G7cUy1w@tbmq6V z<>8h!^jXBzdUjb%?@0XOih4$Jd!9rLF_A_cCVogti}cm-shPG| z6pb*;<|c;zqQ4o0*_FHl;*0auGMh7~?f9aIO`C{qsqpBm1s?*;fgm7D1c!V9v8ZOh zguV>ai%6?N{>KBKH}0|U2pua-Fz@qSO86fB$M81?v$zC}*T=WDZN_N>n!70)!1Vdf z{OJ$xS+%JWq#xDvty^RZdJzXk>vDr;1`GV{1rh09R6bTvh>S~iG;^6=Ey;H1{u$G5 z|5+8(hIkUd8%W_&v}};AJFhG|X(U@a0&LOqT62#MwT%bORivI=^LID|6o)(dU)}rjbrWq+{wpxIbt@>*W<9M z>Gs8iKofTdobyx@7rBXgLWefC`cPBicX-oK} zBpoy;3;)vHQ1Rn!82U@bvFG~}EiY@l)+m|!LPt{|@|)53&!FYwyK!k!-Qey%$6_U) z1Fxr(33_4vhrfGcGGc&27cuq%=d!f3)rmA`nEhoUkPGTIo4mhWzT681@9!^jcD(Vj zY1k%pb))yB7B$>`Y;f_rp~SHUAyLR9#;x!}O9c~``GtT}M3CA-$Ks^P*0#>p;k|Qe zF^dWP06H>HYTKI{xT?&apEv-Hi6#mv_seb7*xdo?=8fExM{EQ+0?JI|D;F=%?f6@T z>9qHU)|X_xen3df)9M^V16Y=9*X2vZJKTOXLjEM!<1A3zO{QJa#;#rlE1%)iE z?^iWPz`CFQ^)@A}HGC*@yZL8TUK1$4Y7Z6AItD{=KEB zy6O!UdMTj$&-%d5Y+w*7eXR>k`MqToTW*i+Qd0+trH9$uL}EK`1x#q`!Qw9XI0$_J z2eH$!r**QYltb#NXV16wW88yE-7#A57Z~Nu(E0DMSvO17g>M9o@c&>LP@3P=;*g>` zXq98vxH#KaZ0al`;$PmU-M?sr4dFPnAdcmftN-9f;Wr|g$sAmetVB#`z14~XRF1b` zPj|k60iqO09UwCzMlsyKjWk;pgK?7Ag@nsd@CX01GKXLyeGlOye?GD#o&4pZGj(UE zfHIAJvCqop=;+N}D72e-kkt)|+^ zCak$PS?cJ`<@2ALNgq(}&rvn97W=BuOupJt(q~m;2Z-Qx7q^CYsEC|L#2(Y0x6y5w ztttM){@9bcDDbbXKOyR$&IPE*YEmkFDE-caHQ-om(oEjm$7vXKSbgXRA`Jk+`j`Kh zKGadPd2ysh@p4g&?|@xn1zAT_WEl_y`CpzSa1#ccvF86%B?$E4zZ?b-8otx`ukFuY zPL5CnXd#MN{r4s9r8miT*8%wZ-~Mj1`p{c^TN~p@S0X?)IRpIqQMc`O0>fdU7 za53;2R9ECm{`bLVcjhUjSNeRa}#j_6jV2d_HT7m;1Ezg{nxcN4?ouZ2P+Zzv3Upxp90@o6CavrBq1^S;OIE@eylEl1$nzX7;Ux zvqj_`Jn+}8BJbJ*Z%_}~<#&EO8@l^{HS_;Q-uA#Hpi0#fkbbQI;3yltqN8uM0Z&panC&^7SQ7wUx`}dFEtu`&1MDoe`V4$hYdk zbTu<@d-R>RG;{mop<>k*iqQobB(EIGJIPcNbRWSIXPLFe{KWiKW37ZWxvPQ zx>B;++wLjrk%`K00g5V?{mi4xawl_?7AiinIwj=m*-$dE4JG4x)HR?T z1^LDF^!U?yGZ^M8kGDtLalUCD-StWjZ>kuX*|PK2u7AOc>q6h^1|dGo_!va5J3PAH zAHmG^n8h%Azp)S?sG^9-%T0HE`tfwOrF2H!^HR@Eq*V2q)Ta-Q8`&|=ixJmeh6*S_ zIz6e7(|#b7mc(`42vF_nS(8*~UuV1eCcql)9pjX`#>9Bceqz1Kr?$2CrZyf!iD`eJW z__S*rz-0=5km;bGm-(x$+_moiX$ix6CD3{A<7*&wbv8mz@0%{#o+smNpIX=h$mKl9TqQv5lm`-i*w3sVCwDLNk9Bg zQ+WeS%(b2G)t${;!Epm;9|B`opki47FX(l;Z$Sb_ar_i#em=ri9x)u<_IgjI1i)AL znPCEq29T?%$W3<ZG#e3aoH4_kV`0FZEQ}NOfj@aYPp;WlMhvn{3kl>Ucvb+2`%J-bXP* zf-;-p)BY+~j%ZngE%LEGWlpO42I&`wUd{5j+U?U;8VL+qJuqlHHkZ!W3{`_>F^5I+ zjk=n!NHCRFmz&`8&ya226FGvSH2lH(R5dW%sUBA)X*SH&q2MB^!nGZS{r_qzvv|h) z;Kq&i_cM?f!v5krnFHnyz|GdPOmDIPyG;8{MC30cxiOvh(G<(F+WJ-PYM-Fs;u$(N z2N3P$ZG+bnZN!>IB^EY9!`(80sox(nzg~6$1>ghM7k}>zR)iL89;jM&^Gqm3a;YY4sZK-e+UK)G zU|<1(HC+$Hhlt+jX$9#ZpgGG_r3_=WOdJdHgGckH0BiOpQXmrh%y)QZOTR+#4Kq6y zS`wn=ktsB{?`*7Nic(-NOE#~oG*^EWn9#W%U&*MDZn_#UGyYk#Vxgd4MCvxv-+z7H zZPHu*KR(=JkMnyAB&j~_S9N-`T2lsSqV@q>sOvyrBqD+l{n_B2T7WqU1q97amEGHt zR5*Q+I}^HWG_0X>ufVdV?pK=KD5uV^CyWhs$Afi7ym2wXJy5jIT!v$aTx!$%j&0A5ET z0B*;b8_~F_qf>^$?tT&=H|?L?1Q>@5YGiLU5XOw-zC(}yl>#vvk?Dse%K3;s@n zfH)32{DiFNSE|LJ^5vJ9nYiZl`r>d)X}3~<7bo(^W{D&4V_YK%xmPw`$v-sU|vK0N2L+3@zCcM-C+Gko)}vh^ka*I(q;=}jO1 z*T-se+xq{%!}CvF9IM8yd0v1V&0xB`V?}=T@|M`Eq)b?ktE&nDxZ!C4h7O5KgD$dS zywfrkZcrkpBXQKo8wYipsO&grzp4(+Q)$i(3-U5I!Vw<`Yd@m=YDJwxk8naxVX6rzc zp*R}DTa1!2{U!qHDS{>XIYv#gSry1dO?*p;2He5|$6^r@5{2C$zgUZTD9AAdn4ZfKl1PxQyXOww4110o;u)z_n zq-sMV&|})CZ7jG?euuVDghwqiG^K>-%T!Kwa>w=_@%uKN$jn3y_h|~r+Pw7{V)-eB z$WEJOBrIfCJv`evaCoXV`94}rZxE+ub}o2Ga2vl;af-R5z(+BwT8OXfBodR(;H3=W zLD0JQgkT=wDQ^QgUmb6YWpy@$_2qCsGrnjtb8m3ts$;r#13c!LOm(x>-DR~jL15Q? z%^*@W<$Jd-hbiZeR!feA%MRb_EqEKVbS(pewGp z7DupuGi-1GrpLSs5?WYL+G`CcTPrMlNA+$`lAue9wCm}~@BG%`AjA;ysy-Iq+L;q+ ztAA&OcDmDiqrc5>u-sx$(V)U-S-Vb~@ogI}1X`-{eKf*}bvJ6=fp&gixy4ZL#T95uK+o?t4osFu&<*5B#NRg7%DoE$C}mc zf z9({ciVTR%UhC~d}p#>|%M$Of33Fx@9#SlABY@c(6T0Lh6$P+)#wGZH#?nt^n{c>fE zmRJ$Sr;+@!8csr{1~sCH$Ldb3rzWB!yH+0XCreiL5ltx>5Un!qAGb;g+;J2`aBw?$ z3>E3Fo@GHv^`g10XdN2nsFu?dc&^;{#aZP8p%(3uAS`xh@?1L1uXl=Nz)dAiECZFB_4`CG*Fc!i5!TkOC z#0mnGD8?8sa7ldhxNzct-HxPq#vPE;$~*meRLb4+wH45fp13aZOYcm5p$xCDLg-_N zBi3Lxgc!Q#Gk~Z|l2Hf%SS0&7JV*TG4>M@ZmdQ*~c>=0fQTb3fp*VGrHkDk>in-oq z99M#U;6AV#RvtEUi4tH4W$o4U?N7)nJG?F2l98L-kh2pjm3P2rk|XBuzxxYR^AQl=$e-|zNBLcLJn?{Y46sMlWtzOs zKt6-FJF59{1(!(!Xa2r=JYvK~MO$^fEc)Z4M1GbW{0A2&{4D;IT~2L-vQ6bH(oEs| zFSmUtK-`uz*nsgqTPIwl5WErOCuyn*S}jea#* zf4uzN$IM!K*5p|j!ykqbELM+&LA!(J#vN|g>T{7P%%@){gC8!2r+c~fAD*pk-FojC zV1s2#HT?Mx)i(NtgF%;xPKssv)IOT6>}vpfN#F zSYL{!@JSk^_vzK$2^S|>4$GOtw^*jJ@CzmRAD>6v*G;Z7?0>h;SXfYMwoyJ)n z1N*s_^9S)POqemRGe~o0X5rlLrD=I-gAyw!KU);&tI-_jdjKMz)wY-WM3PnEm9k%X zy{yAvmdSmz6#-57UAq4AEICaBY2cN@C?E3SVIup26Sx1QsyZK|n8cMv3{QPC;YCnx zmG{mYEcJAmxYz3wN1MtEv47eO(Y&II!nY7-f?%+|#r<@*G%X`(B0olSM~Jk<&fLd_K$GVJo~=irp&{d$xZE-^2b2vqu=rf4Q=l>-nW8UAb1zyO2LJIjYS%^@ zK`+>r_vu+dj(+evhrnUpy^hsSH}d#liKE&Z8U8U2N`C%^TldMD_OWi|k810B8iZAM z+>o@qA*5}5dS-Xp8fqZ$A<@cX;bnUBK#p%I@&mnqnpl=>L^|T4wjudLZo4o)iQLV6 zS24+QL}M=siIj~hO{Is;GSL%sSeOnN8puyuF~tf@mg`E>SHvmi^!LpYm5a$`%AppL z&;bz9PoZJq#m$bf^J@y0NXlc^HG>lE0QX0`OjdWzovTDb8>q**g2LY>B!%W&3aS)0 z87BDVnaAG>TiXYkGGmENxy$*)j@DGub$N?k0(X&4>6+H9rvo?V+|9Y%+ld_I_1F>D zunzq0#dQLVUxB!?`-$jyZsl; zWe!;w=_Qg=4QlDQWz%V$h2f(zc(>()2ZNuCWGxe}iYFD@L-fNKe$BMNRbAiwKg-Kv zCp6eh+=LYb{>T4*M&sL%i0%(xQ@L!w8)47H&;zO>bbTdm0G;*&5TkM zxT6zMXY}EEBP4M8!QnZx3%{#A8c@Z>5e=G>hNr}E%{2jJgChpA&Z}lsY=zzZp&0%C zvKrddh&t7r684aJ$a$e8VqO|HbVC8762Gg0Qw?qisVdXpe$BD`O1VRLj~^-<01s*r zvhd8LD&xWYIVkjqYR!PWN+=1_dCm_%-qC4ZIbqE5HwuM~jD(%|2rZEo&<%Tgv|<6+0OlynFSrIFEtr@=}@G zc0GtddX|sv(A+-wFbGw7qj@E%zok@4>(^QM`fX4sOKs>k+TaJf;N`0AWYu_i->CUo zqTFfT%(SDmX;Qg2FssVG$IKnaeep>6diWMIwDN0KE%P*J>2oFnICT+mYQ75W5?n5~(v`w^kd`)NXd6w1h4Y9MkVLxMN{N%31vos|*ZKAX1)8#j=lY=7kE(VQfx?dYW!hP_p(<(vSy(af@0d<4aamj&rrXI5b( zYCfxOaLqNRN)njeJzDEIrD?o>n_j$NBJ3#e+&hNjSHAs;X=>yl~{)V&*~Qy<*me zdXM1x?9QYd=-THQ7$Yg(ctTL<3~R;VnB^+a6iI*dpmDEWYiaF(XI6zNksU`Ui~j6# zlGFW0w`}n&A`Oy(dPvxE;MLCsm6`jeZX+TjTaJ622#H+07&!#!uCa_Cmw+q6xmO3;+CUm%-z!<B_oF1(r%P{bSYgTG^9t3B^`px`*?>)uC##3)OQ z1iy_pRZNB>eK3>{Tc}}U~I7r1; z-av!@O(rgiEF4D%4bKf@XV zyG`y}Oo3ea<&YQnwM)nlp%ZTci{FoE(u=H@i(*h=pVe4ZQCX3n&d}t zN1~|)IN`2P*IQx;o(Dx^mRQNPTV6v|qnKxZARa70YWbXOiJ|AhvJIaQoey#v{S9>; zIBp(WwQ7d0aEsPsmXtkjXe9iezfH>wC>ca9>c9Nf7<8c5Dtdd7+}6B=jB#IACs&!6!ug`S z>H9mABpYJ4^*wOsTSif}dED;~9lDcml6y&QC!=P@zT_b%e2C5#$mztAWa~tf@xDta zd{#X3#ZU?(JLi!!IO@g|L~?O^okhYrkD`ocrv^xorCb6gwlY5}(aO(&*Izp$AQ;ri zjU07{w0JKc-JC}Dhb6kVVH7W!?p371&@xf>FCIALB!r`i{Z`OB{EObsfutweD`r-B zWWH+rxYC3Qi*hpHyn~HJ;Kr;JY$-~sTw=e!Jo0h_yT^~${8AU_`-91QS_%g+;(~O2 z1lv2R{RW)(n9)|_OBYKlf<-5|PMB160(6gcue!o6~@%@%B4t=>vzlP35l;Yn<4OS~Gi@lDf)XJVn)ay`H9dMd2&fC|2cGfuvo4H5~z$U*+vsfof zF^Z*no{`7^XoFdy5Sh{sOK(zPjj)abveJEaq^%WDQ#T`zL={fdV`kRm7H{hix4eWG zG1#OBp~AIbrtKu(B;mot0?1){MM0+;FM$d?{3h%z+)-kr6IM@Hm8iwG3-46PtBqvz z(r63szREa7s8n!p)n~#~PH|7k%UTV~ep%Tzwy|DUR$^i=a1j@lnMI!*6!OF~vf+Gu zj5^xM@uZHnKh*#f6)5{FPKqx|(Z$}Wd4~oR4s^*ly{TV8cZYXt!V1F3JC0r{Zzh~e%sWl*5@23{G5R~_JR%I|9L=dkkLAuua0^z}_&dD}Upgz$Cq zN^4Aa!}1sLr_B-+s!>LG&WOu^1wqDMfy5z{&FTI5<)ot^O#zVHg(0fcpIm<#hO!Zq z`32yq0w7J5g-Be-#@x~l)cDyGFSHW?0+1|CFQB8QrLyK-{y@+pWW*)Is zndKRz5iONylS}pa*R&R+bJNEgrVx)9&aSjX{g3=Th%6mw2BP z7%`BU`rNGGAEG!&(P%Kz;lZRockQ=(7XW74^hvm))Q~_%&II&jNfJAFSvZ%RST%C8 zNy|j+UU!|XPQ}yws3l}Phvnj%rh-kqv>)O*WMNFmSvrb$YkcCQFD-j2e@MbA0kn9# zId**^o1|twA$`SUgfInR(2-q{HGw^ga1E=wrEFit7I~Xb5dQL@WDi(YrLDZ-kdjVy z@ZDRldkJ6}pQ%v!0h}B0L6HTs zpzIG-4lYet!mL=TJxZA>9q3o)$CClY zzl=Cg|B%mx&IwRT&cC__ScVBn0`?(W7n%|Ur~D@QeGc?fA$@3wM<_kbAI3gAwLEwZ z@#c4vv!u~ zOHzgBtsV>@>)g#*H>k#<7ez-me*O3kklYJ-?W-vD^1HyAY+^(H?NTA{vCnHwc){WeIb~o#IU)&<7 z*`cz|S9bFnNAl7oBL*U{DdP^PtrAJ>06JsrijTlIYgKciYO4M9;+FTVVMUcuSdLTo zG8Erxv#|(?b2t&lU2Ao@BmD}w{^1xic&2{8->O9%hGe>5=VJP(*B2q|n_tx7m48N8 ztm^0JLg428dMf_nS86wf2F6D9%C$5_6DPyqP2Sxb3DM}lCqfck`n z#E^PFRtF8^qw6xqv3e3btp1x**G#_ShqD^yLO^p1q(N#N?%b&XvnhCvR`T=Zp>{dk z6#}jC7Jikk&t%JpYn4M~(b%;4jhBh447L$j^W6RwevVo-Fr45+y2&4k^h%W}xFom( z*-LT2tEhtgp@DQ`1J$qefa5xBqc2`2Y#>?VTzCeL2)$T52e85uM zSp^%0=>9V&{W-fp^a+_|p_Ji%4U*gMy@Lw zeC1MPZp^LjcbO3X_@DWXH9y&JN6L;r$*RnRz3k&X=X3Y;Wjy6SE4kjh1a?<^g5A;_ zzJie~&bZ*@iKY~7Ya62P7D8QRn#?3!L~@6p9kt%0o9pDAswQYxyz8SF>oY=7`EIt8 z;)gX?w%|Qv>a0w5^MH{~Oc|$>Dv$J1-3F&zeB=lok7*YSgs@K8q*Z$D)RTa729So% zb4f>-xq1~*sR_xR0gm<_?Q}T^O~vW?Ww!cq4{mZOjNGh2)G{FYNGmmzHNh{?=->#1 zwHa;I_mTP}YHUtHI+@&FJ77Y_eQ8(eYGNKey zc8adSjIw@yx|;l?WD&g{$QjJ;0j;LSamUHVrtLPh{%2V-e1%_3P=sERtp+D%d#tM$ zU=bX)WNG_q;vL?XGYTfCQY5y&_8V)holrSCb3; z^W^A$T_fv->l!@t)sua?@xOUc6#A>x=CIiI7!O;8<+5?jX9mIm3ahEx>=;{qqC4qJ z$Mv6P(YNT_iDmw>OA~ncoNZy~%@==gj+$y&zwxI>PpW9%SvG-jEzu6FO+?NTS0NE2 z#My};_lx9(T7W8+1n$yl;$3$$An}O#?gr#54P<=Y9MoGTD<&WbugEhATYr-7x*;*d zyo<*#;82Vf3(=lB<7fd+XJ9^Z<}Pw!Y&8E!?Lb6n%D6m_r@*a33lTz>^&uK@Cs=}b zD_+I#+clz&v4EMOBdg@9-dFeBr`4-N4R)^o?p2VI1;YiRpQav45sjR$q;&FLC7$dv zpiX}RL3(>x_&w&-l~sDMB;`ltDA-n-?`M$i3{RaT?bzC;xR1i`xaHcmQo$7@*JgPV*h2~R}|%k#mUtn_5Rdabh0w4 zaDXwHcPcv|=)uyuIer=jM~!`ja$oAlOxge#inyG+YZhLQOrd`V^n^pk+WcaL+PXDQ zGSwU>>%P=RPleOqql;xx(EDB7C z=|cNt0SQP(4|M#~lh>1NVT1fhJ~VbNXP*7o;VS!eVc}C6ZJA0k@O zrSqQlMy2FH*XTM6-?&r&jRyg*AkROcCHXvu=~M05(6P~~3bPsqYkxwD7WCTviXxRu)gr=im29+D44Ys^L?^notg-k;!XId+w*h?V=r!s3d1Aj0Q-(a?e*As^5)h zc(33Z$3Xv!!8w2PrRdiKVT8t@Ppmrhil+YY9eTb``e$N!6s}=U67u!jadyKZM+erT zy0iHAB#qEG6=0d#Du_U1QB~I5ZyY@N4JBnu?KAd`=iIf&+&)vqp}l$Bq&dj#+%8Vn z$B0WBx<;~VNzZ=qkmF%6B=dQam3upF;L^7p6yH`__A!&5FxF%7hDG)}_$UDGvnIOg zA}oUHw-4Wu$n#JAqeedFdQ)P0vAtg~5!|{v>W>*uBzlmh(629bbwXh&^&!HWC#bV) z)~fJ%;?VZt`|LqjXMeyqZZccln&ftrCO9pm&s#g{!Ec8M`nCF~*1^o`Hoh0|E};fB zEMiKx^_Xl5@;Gm?w2XB(3o3EzJHUymyPB2q^kr{K2>>f@H6ARm;9EXQmxe2z2w0s0 zHtTB}pDeylfE7ya-nK3?;RaDQnj&yVy=@E?gSD6%t;h9JnV0a@^y44|C6{dY0?50l zwPoB&dxf+NZ^=SNm1Sr}=!&6oN3f?PTeIanK{oYPb!m9T;E|I%US{!r?cK^n@ zlgHW`w6@=+BtpjiT)V2EX}v;h_LtpB9I+>Cu+6GJw0z|?oT&YoFjDN7P~SfMbab%o z#$v<*dC@+0V%&K^NJDbbt1~gTg}_qx`c)e$vdgvJjjUyDs8V77i>rc$f#uFqN#PcE z6ZMqPEKvY=AEXAgc$F751lh=1Sdxo68Ul80sR}QD4f1;-+7R0bF2-_$+Y1KI7)qE8 zZTEPST^%YO84eLS2G#Ecu+Tk zq=F=p6~A4r#nr)^_S^~Ll+9TLa=I4$X9l&dmJ*3(-~$b0^sZ?B?8co`;m%WAGadPN zp1~)QO(qgk`D&8P@}WFtUlef!BXpD$RM_llJ4I4*KN}w}e;t8K%GbZB(dl+~UV@F7 z&t@@Sz2K+5uMsNukvl`572j?6&U>b^I#~!?&$RHO%}bq?r#V%TEJSws0)A5KH%dhD zj=Oy41!P<6kiVgUH*2l)5|3-&a}_*eT{=NwkU{blZS9S| z4a^MLq)XDWdFg*hjhKQV${W}8925fhO=Dd(E7%<3xHKS$JAJ@= zkvyRkZ`ae%;%XojrPnoJ_6wyZ@vOS_;jXwJJz%BY>+(x$J<#;(BNY6%{lf7oE*E_* z+4=Db7V@^x`Rh(NxcQmz8V;b1;6}L$gSqH>{dIGwI}#P-xM$k5lj+iM-fVp)pwpu) z)Gd`8Gusl_L_fOW)mn23N{03@>Qabz6r)#;;CwLk0lLd)c^q*%**ZZuw8EaXON`j| zhcp!L)5>x?*I@j6G-|@EW4XakO)sRDKc1URwlNfdqbRnd3U*)g%^7ATmB(0zQ}>CZ zU~YtdmncCsto;j;jF1?jwCblWC8XTFetphsu9)!_6=3Zrqj6^WF;`-en?!@K($%%b zZ1)6>fr&6oR~pq5vDzWQ>^Rwq=j=zN&W-Arr%z;;GdE7Xsr)=%F zin5gSt(vmp$Q}p$3*YXo5xut=8p|5K#eNXnyotMM1?U%sup9N-v;#CRw~-+Xtz_ZQ zy!N^xtZxYm*SI%F_{DP`QhywU5;@SjiN7yYst86Tm@g#_d>y6D{X%a4?wpaK-C@4E zwiTzo_+wluO3;WJDoHR(Gk-*d$jAjfFA+ zKF?^*$jBHg4mU${;3SMma=o9>AshoG%oS=1j%P-rD>Xb2~O!FdJ(+>mHoc|L)+9lPJvf^;{g5TJ<8kS)fu{B8}YzUpS zQBLrpn7{-*N^TdA7c&$jEUIP-=KW|xllm=d3IM&Wsy)oFt*5NqM?%fQ-_jCVJQDrT zY;`ssT17~%S_D!s@LAr#Al5WC@J`1B?pA*n%?nvE1UPq>vi{BtK-(xQ*s4KcJig4!s9otJ~JkM$?8`99hsP3x4Uy z>VR}9y;hm_;(87Rm%HM^o4f!T3~TfAbd(P+O9Zbd;}@ox&^FYaS{;{nbX8=iz#q4= z!1O~NNycr(4V`jWK~%;+c$Zu4mBn9db!X&`#P0{oG!s4XF3}0j5!Eb2_a*5-HVJEr zsE>sg?(3J{vU1}uX5QcQHZ^i8EyH`e>xLsF2?x!o5D%Hp}UxjfN;w1Ye~GRmf-0Wy|I`%)dRj6iGTnZ6-P#$qU~ThJ;p0o zbp#gxETc~%JAT=*3Y2!hWYaO#r0}}kIzf}1`pH8$yg_SBCcJ^AzII2 z037uxOHKJah8X!{3o>%po8nPL8v4973F|IF?KhIs>`?tJcF3E?{;c9PXd%JkH6+$e zYjtg`MS3%$dU@T~kU@f9xCCcYRQ!isLwCJ+3y=54A(jZC37OC;=iQb1r21~vH+c0` zsr#ndqM{SmYnEm#5T9WiR|54C;-a=e=Bsrqf&8gN6aqs#!O`-2mYA+LVlEgN_lD}k zowgn7qtAP6Ur#)mLmHzrtQ&&Hn~;7}rf%={VZ!RE`P{7LPAqF=Z_=p?taG>pGMXG_ z{m-mNe z8NXgGi~KxKbj!$GElnOOcSD_k=V$XNFD(7K8JAycJvQ$hTrLf()#n!k@8tE3#EVyr znH&|jgT{Po*ZIq-TxZOGS9*dl|+5Inp$+)UL%MJKDMg%-Ic2EQrcHE&m|Ms~`* zzH62;W$k!(#FqIk7FQ`TA5$+EGT#DgsddG5@m#!ZZ8m}r^Uib%Bd%);~1WdyDR zp3{HIZ%QhPT*eXv@wGmEe3y%2j;tn5Z{gRxz%m+<)CcRGXt$)PjjW-1JNt? z;+S|qtPc33KLAQ><(`5QqSyv~F*`Xm9ps4H6W|-oqwl9KaN%k2ALL^*TKN%cTPP8X zOtTN~H(B_9(a--N-i(R}7+i6{t1T+R>&|?h!3`t}Zm;>v$2PVHJg+H)#`H~mVbTlt z^Ip>`o8_mKLVtc&WDby=ytff4J(#iRdnDXFisB;4e3uY_JLT0;>FB@qwqs*$g)!U= zc1P)Q!Wzw5?@hb2H{xIq$oBGeiBQs{0;*^J`dr_9wfX=3aJCx)v8li6*m>TOS5W$n>9@-W zl133b<*G*TzOSK=yZwE5A$4M9v=;(#jqb7ube-B|XRCpYRDhs|Ow~%B;;x;-+;tEU z-3q*=?NkXaZ3+cR6NGQ)bt5DyIQ@=lCx5b`yNsT#mP4z%jJR2947Vc^+C#h7hFHub zdV2WT_sHr08;MzW`Hn|v={n^a63%gheH|R*_FGs~fqb(pjTGtn5|krE@Id~&KDE=O zzOI+>HoY7^FdTrWnLdxKp;N@PN_(_JCh8=T54a(rIQ!b@2&=_>*j%aBR~Uh<;E5*T zOmY0`ZuP2EedcJK+R!*fk-V)9>hCx=^)zn2nVCsp{XemaGe&S9A^q%X5My4Zjq=`= z@MWIW8%pI>5*9|SH7DxPz)Rz4ZY;q}(=>AKFV3yFmfm2Jg2CiF|-IT~BtS-6SpI9Yedrlyuu1Vl2k0LZ>b?L)ie$o9*h^eB9l+ z&x&LK>x05L4KhQIc+Svj^jCbaqlX%wqs!~)B``m$)ptz*yA2LTIo zp4`T3w`VCm3i*q~E17}ik`oY>JTF=Ac*JZXj{5y?7BAX&;bCRieZF8&SSjJmc}OTb zKh?E}qX8yx*tNEGD)de_AXDriG1P=Yv}*S04hvo$lFn*pR?ss1Y_CyUjij1xY|H1k zgnMVZ?GTqyAd+H)lxIgRD$qionTUhJ7Va|#sb|o`BUZ4B|^>%kfWLbwU{Q!t{FHA~! z<~qA-q&3oZ(A1)%93@xpwmdX6sJT0Kf*Q}Lj;~jlZNNf<+iARQ4Fr;_w8rjT+E=yqboMvgQqzlgl3B$j8UwX{$^KbXYg0x5 zyNwbC{(!jBAF<(!{6EdTS6owFyFF@sQ4tiCCQYg+O{8}f0g4YMk&^v^vbg3%6 z1*C&?Na&HS^j;DmLg)}W2_z)^gYUQZ*=L`N^E-EEU2qYyvgVrQnNJyGOpw|<+J}C# zXh)k2O5Lj$IO&jwmJZg4%V#|irQIwk@W)mSL9A7ZM!WwaxCtbU%mMPW+cIei*sp9S zRV~WauNz4TTnHvBZw%{wz1;HV9J#tTg}QSjGA=$%&?b~k)%Ib$jHP^pm<-((X-(^i z#ODF^9bjn^zC9R7kfqOBHfkUrb|7IvXbo&-X>t<{oVdH958PXZKu2K8J8=Kd%wsk* z=G1z@x1?WX2Y`VFE9m|xGuizMyQRZpz;}?2j&#hHRyT48;v>rednz?vt5k^|CNTcHtx9*UHI)n+$T)LOKbTdZB!Um_ia5RPvdu#To{nX~ zEhq>kezOzgOp4zeRo#Df&+eaVVSgkO!IO7{3S|7i&~E<~jIGQRp=OB;vjFcu37SNR zlTW37?mD|LidDJNGgC+W+xSYl9V-gnehj8zFz7V(EJENid27L+uRU(#$|CPdk~~^R z+=NI)H1e!Eb|?;DEtB7K$ke=eAi9AAag+^XYA1#&H`yaMg`1u z$*@BQoz>C`J<^{Mfw9=Aur_=HsbJR2ab^Ll60NpmFr2OKAT$a>9W>eET_tdOMB}|A z4ya`52904E_bdoYZ^yPy!;`Q2$mpj#;bzz>q^4sDYpULDeSKRweDW?b%|sL?lVYGU z;9YrTAzSlk`PoQ06hoJcU^(-j$T}hV%D6#dFqajf)fOse&AwKp^VZO%$=t0FkVKV0 z_$drnyVD?!SwEJWn40E`BUdGZ0v#^_bUkg2JKH~(i}vJ@3qFdH<{Q`haEMGnZ*@u}JYGwIU-n#YL)fkB}yyU9A31Qyavy$4t8zB{3K#vV^|i8rg?neA3oz_pP9b=d!d=lhQf*do$DV4$y!yOiOe6(cI3PVvNq}QR6-Xfl((-MW30oE zueKuyTLp^Z(VDy7)tSyVjy=w@lhkqo(ib!S#S`Yh{x3Ws!dv<`GmGgGOB0M`zmKq$RmfUJA#|qe zNkf;(%lf|nh0Ee{I-P|e4`{*nVSVvu*Dv)hW66At_lhLK1Ew=O;oYzMp{qqz z*a;xCPAJnSRYTz~8L;t-vFQAWLaqG*)l=i9ZGpdk(s1VBwSa6LXPI zXfhh6uTX%8&=P+V>=vS21z&XnIn%_!ybc7ajhB3bGic4 z_J@OAZ1(pdD29h{Ii!OfVah3A+dfC zpooo!==}Ap>hSjND8t+$6Ke)xyt7P&F6F(u)U!UgOghRg~1 zi{o|9PSH3gLtJ!`ALbD}0%x?^454HZ^X|HpXX_vYYnu3|xcn}?ck<5WJJKu9Du-?wxdO6A}|wLtR~!*7GkEFsRPCItcaM>d)nHr2f48 zAdkQiNHtFGpl^uIg-74*^mS=nl3BU{5JlG7cv{wRbDssSkK8vz(kH_g!)laTqI!@; z8bS9nRL1Xsl1?Ex*HJ11$R-8G=!$Xvj(Aw$6px&KTH(L=QJ25m6xchZ0Z<>?mWxyp z`%zQve_@5|Rodyum=OAzV_{q53~aDU2HfByoU-ZTQE3BCE*Vea-&t+7rqE=zYBF^v z=PCP!wtQAyObxK?NPr!jDyb#4Cx;(B>Q8d8$Sk=DJ45U|Ag|{R?J{d zc`Z^u5kF8a83rX8K2rtl1mRQ zyF<~WM8#Syx!%^Qy#?`{(;@h1%f0UhxdTFeoo0fbl=4$3`3cSW^;b*^2e-smV;_lQ zkd3O#|MFra!|U=hYnIaAiNGITkR~g|4eM=C9^&!D08g5Oi%kC7)00jWWvyT~q%u*@ zBq5JNz-9=kxJnIUS8TOLqv>z*Rlq!yTZE+_%?w~$;kq3^wK&3^ZDr=onvV^dPPN20 z_eNuQPl;5wZU{c+B$L4kPvbPZVgG?%w%nT5K>2xywz}fgFjKhzywP^z0tZ8jFu&@dS#eP=Tv@UVPe|DgQJ1)sR#J^X)Q#ud@(|3WVhs^ z+hUA&z}O4)oeDS8YYq6x8o(?SnYk%0WI$|Ddx*fMo6&kYu3-D51R=PE8@^dJCxtHd z#>CUeKbB*}v8;6AYFC=0Fi=Gup&g;Re1A1jf<>L$$2|zphxD`}*8y0d2APwBRpGXB z_GfFr{nb`7R2!4S)J?k1B{U5f^w+XreJpg3j7}lzvln27_W89?SCR2&Cg@I zQA2_|TusSd!^Gz-BJAN{RoAk_DS?&XEJ18aum;x_Vm!c)mcCA{PSZBM z?2|WN1xR%-uV!rm9nt|uzOMb$q~~NXwVDJ|(41Mop9@IqUlgBqS`UaXjtxzna9P+O z0*puo-EG%ePX@xJ-%1yhP3Q|nmZeCS&_|JlH?I}?{_$!)!g5*;*2ExvYX{A zhl-GJWwbd>(9IG*6tAxLdV2C7LQ~&8f}U$D0dIwNk|1{EtTcF45+!`p+`6;cCiFNAksh0B zdNb1@^JPH`1o86aXmc@7zmQXJ2oolDKQe(lv$ME(02iXBwj<^#wT!;4a85P$m!!72 zwR=Yi3INi4B0!)I4$QYsw%WYcqylC>hCb5~d!J_5Cpjo&X-U5mlw-E16}q;F-yb5& z0a80~i+d23S(9OJSwJe@ELLYn6OKSL%NW>W`k6KyfAhNeqOHsht`6Y?FNP4bF)i$8 z0C;xgjz|h!U}uv`$+oR_(xa(;-5D~oRt+J~TNS`bzc8`Vu;E*?qg4w0X3{tFWA7#M zd*!VN(|ZIZFInHBfwPYXW=Oks7bmj@zZ8AUOn${Z+@t(wkL+h#N?D-+72pSU`tle; zVrAJ=aQSHfv%)*&J8`-8kf_5UHALf}IGAzLLfIY?x|X?w_Nj*p?4UVXaMOBhh|)8K zF^7$G%`OH0$^L3eiI~{3(Ba{u{|3Qcs|Y06&aevZJYAXt7kIkheTsYC3%29#v%W9O$tvjY2jGsjYrII{e#?*R1N zE6YpID3(1}tGWi_Uw3F4Rd9AVhP5mQCZkh^hlNYqJeoc+6AV;mRp@YrE~@zOfd62M zRT>&n{pF|XmQ4PM)GAFy-MzpngmMrCj7BOr0{xzx< zKPEJrS3zfRVwpP`se>Rid_uqR{N8ftPgpLt+G+v>+QJc6^;Q+umugXgOQy+&V$~qE zK+}S*L(}@qTBf_SShhy>NmCXsWlZ@ zG3Zv-(Bpgzoiwh4LP|{@grI87nYckv4=_Ln4j3TKmO^J$a(8S;ZN+SVvg_eQ$nr!! zexpzo($QC_#+{}q1o+)VG$mq#Fy?^dnsOI+l(p~E+sIPb`V8IuTnemqAS#VLu2l+B zn{Ktv6Wu1E#<=WJ701xty@ve);8*DKhpRm&@;I@nzr*~g)AiA4MSyTU;b+R`V@Dq{ z6Zzt-MC-&YX7i39SDzDzFY!1T5@24J&fVCop%Y7YNNh)-<#=c|M+cU_t|32TYSHHCK1Cu zmESGQ?nSD$TbdoTp6f~CaxNW}d@7(Q2iY`o7Yx=_-vnr!X8lzf|5(2G5H}L}d{jxN z=ya@fQR{s`-$N!wEp?HM+Pne9llDQa1cyvXG9Xk&GFC7zt7%eD#?u(NmtpALvp@Cv zz{pSA_^4gPK8@J8Odh)lIH<1$->9Am6Pv7YHG4x4P>ns|b&2}J1syN9T6mHF9XP~u z5ao0B+vU)IgQ699m!8z<`uv>}=@~scpzchT{j}Ij1O{k9?+=SM9+OUIuu-(C^zI;3 zT@d{>nlt8b`W>PI(kl?)7=%kdYS6;3m-aEbzM3Gak3L;C&8o$G0k`B~g8v=NMDd|7 zY3Ko{LK#%UUBH%{4E&4Jtvg~Ut$J59f`bs#-G6aOe-b1lnu3O#J`?KfSW!?^sJBw_ zoHisbZ^s^77gyG`cdQZrG_YJYvnkALH(*F*qdt zp^aRNATL?V`cP8a;o*6vK-r?@jXo1U?ez8bb6}_6l=Zt9C)6E4vY{s3RI8m@?zcWt^wwy$gyM*CGe8ugWYI=vU@pij){#ukJiR;u zl`-$Ilb5K%WfGD{HQv3D-wvS+d9~+ZQc8wrwpNG<8KfOG;MTb&8a1y!)&6VFza~A} z+0O&Uoo;di*%GE(CP_6Vk0CiF=z-S%4F6d77&!}<^%s>3QW!ELgb@9D#j$NWsHB~3 ztXpH$nM*ffA@qLkh>r~!ml=x4J%W&I97L)oaec5viXKLJ{Z;`!#DIXRZ0sbSA9vP# z2$-6886-YONcZz%YTm?qo(hf(oKTNa^^c4GM4csQ>j;ORgSSOn?b1<4qs!t89oHph z#;EQJou&YH*;FVb_zy7<>j2%}*7F%)KbKDY?-`IkIAr?$zh;284(&2{`{5DO-q6Ny z3vkw|A|cIvW_X^2K6zI&x7NS;xR$K6aQGtK|Jgd5gd*JBYCDkKd$x?HM>r?PDrr2x zd9`}mO6@E5%QE1|+I}!6YsgDLD*5^2g5@?hk=BR*R*XuIBkesc<2^@~dKH3ixipW^ zKRANZ=>1Mpn6Ay;skWsQD#`ee1h!lSHiVi95f57o_zGMvnQV4G!Vo(t%~Vrh6L2V{ zJiTeeyJ*gnJm^NKZLO2S>=qTYZ2?}T0-`-ik83`6W1%jQ6@8tQ;lqyuR`904eoWu% z3Pr}%+@CO|Q!eqwKl~^VsHkPV5`YYjxg!3^(<*^D9)wB`GFM;_mPW|qJC()+YcmOS z$lafcm@|1%0ccEG195}lJf@wh;snChzGJ*@T?*^jer@(<`4YQ-#cFIOaO?;Q1v;-g zqJ;KpGI4}7{G1DRb>2l+hcSpg|0<*X9OwVq5^lXFqodCZ^)l&erLct)P}TBfCxkCo zqg|_Q1_ql8K~B(TzQ0>Y>@5fFN*RBtw08)Ypwoib>6fpiKies3z#`Ek5&TvXY5gsA zq@{$rm*WcJ@Y}WksC%pxum?;edK|f*6c}7b?RBE<5UU)-gLA8LGJ95omFD-V;0C3u ze2qnwPq{e2ib2N_JJaFR4P}QS$I^*Cuo#dxqzo9vhh9*x4Pu=|LD}vDl3R%f6g@M^ z>^r2I-k`c-RQ}_>)JBERV8H&4%h)oDKEg-;HSqo|g~`DnGsj`@+i#wMQ3GV58RJ~9W5pTkT}@5=aVx@Jw0Az`RPaOg_sV;4;$ z#Vo5AiakHR;|DP&lOQM6Mfp*eCk;73-_bd&6a4p&%EBP`FjHK~e9@|hsoJ1*{AYW= za~Syl-g&+iiT^rbdq^Iza|NOQ77&7`#3>i`8U*DZ8W(Z@}RrDEA)4frnqyuis> zi~U*e3hmc_-Oe@)Un}b!n6Ww8+}Nz;5FHC?->1YqMbTYz;?Bl}%b-f+y{g;jT0D^~NR=d&Mm`v%B zudbJadnj)4_~{AW-uf%Ifs7x5IKpo@Omc&v9zlRo5H20xAY^)F<3Lz8qjj9dRd;SE zk6)*Jxwu-+y*#-V^u`irL>MaBXwDv}`fmePLtf>7t`QE4!6m1KMswd!(wuis?%QyQElC4>6zF`fApK`Op`#uph78OSJHz>mh#Qnl-mw0y{b-&u<7khmoR={u~z*#@uij9vPy8I!?dH$uP z1s~{&DR`t{Oq@&d|6{in$H3BIAFHExGm>ORFH9`iZS$X_oX~NT>I&%V%p$ z1%G!yKQhZQFJLxN90RrYp8E}`zS~NmLF#^-$w{86aFLuUH#*hYG?kriE%CZtIx~Xw z_-0y*DM;LWerJ`lr?LOX2!`oMza;;QuLb9SYYCB|fuhe%gcJDt|Kb9twSxE#=WVBU z4hmkt6ED70ou1zt7RD|&^r=Cj{^ZF@7Zb@kJ{xvEy6?_YLn=tgAR1GrVE13(01Tww z#u>ha-Y}J)<7ox|Aaghw0#07=28pz_Qik+CDLMW&QsHzw(7R*EC8qoP095AfHhVZk zv~2PrGi3}!%c)wuvorz@m+jWibM2|`3%81f^u|GF_Tre>a5ap3;ZWJ1iEjhXOy_;VdveKZX+OFT-c;qPukqD{>`5Lcs$pQX5?P!7DHt{* z=e-s&&|eCK((UR^_<=m~^gDd|V42oHwj8YU+~ZJ+p?cX2JJS(YbC;Ts(y|GDh)Adu z*k#^)jY7UB&iQWA zQ^(0O5fCE)5Ts?NKJ~ed0BH3eV!N+`&~Fyz0GSOV*wnNO>sig5+o9H0$#SWO`rlwM z^W3w%G13MmHF&Sb`BVxeYCfiXD4g4B$Eh4R?TzqM3|PeiY6aj@Bh;U^r0-GW#EnD(Ds8B zB@`<8C4>V*>HK0YD`gv)S3YC>T>C7C>J3-oDQ;^iV~I;+G{i>v1<9qX{A%I)Z~4`& zmTw2Pwk~-P{TT0(Md{bn>WY@Sw~Zns?taX%d+L9kF7t8nnR48wt@s1Ze3|x`q5RM@ zV;!0%Kl)Lv%^x_vmB+KvH@@LX7d7xb9L~F?b_XP*yi~}Uv>>ta8sEc~2ki3!j!WQO=Hrg$1xE@t+;*<219ey2 zf?d-65>K>UT>O{c2IZ`eczPkQOY5raN$$}`u~(bA4pM`5PE8tnFHh-%ub~9u+M?y- z(q0%%$ft?gTb`~@9@*+G*t`pZSFq^<59r&|gC=_5`P?3qN_ zQX}dI3O>95WCYI{TRxd*@Q!>UN0&Q#>(3j%M}ym%C5WN-ON;T_P)cGdWlyd`X!GG- z%BYTGtLB^z)Ix8bdm<*8Z&j&;qDQ+DFW2XFgD6We(`D@q-TAip@!CBetJBR}iet^U zqz6$^Y-I>bvCaV(2a3~%`QXu*otH1c`X4AY3D(d3HRyaD;2ASyAdW$9Ws<7K^D?|u==*o0?{9}9BVNCL?x5`wqRij@8W}q0 z8Sv#u_2FTFlEV;(FUwQPJqp1zNMCjhi1@nma7%<&EFt?zz+;<8((?piyW9R1XBP_- zrd0Kj>CO_+!Ho|Q@r}cx39R&D^w%4Zl|`wZ{UYzo+sg0qJ^}x+Yv}$M^t^~(nCbe7 z_3=@sWbKALi`zQI9x>7Q2|3SMuPrYzXi;YZB0^QJ6vmjOt4wm!)>o1$7~Fr&fA_u9 zaW&)BW>iiasr$i`_k-SBW06={Iz>R>HLs2W%iaaFq&5)sePxTGb4`~5xZf+qJj5J+ zeFbxuCOtLILqbz;u5~l|6u-0%0(Ra&|pLOCMFdt{0wjy`?^!a`9Q~)?1)#O+1Oh2fs?kjVe|5nH|y@|Omsr!Mr zXNc$goGJ5A-PDz_fUWzjHjlO!TQUPcTk_13mG@KCUa&Ox(90yuL!6?zJF|}Rg0%g$ z<}9xGSM=AJI0g_B=iy7QG0huwh=>9DiJklmku|6lu-~KLK+Z<$T^2ljb^pB#JBLg6 zpOlD*X5$$S&mfWXgCBYeP9c0WGu$h?vywN1z3(k_FMpHt;GtKknc@@>pfWTp{`CqI zx~(C)xWf=@sWX+#%wM$3P5RdJ6Z*bm2IdwK5wUH0Gd42fSCpwz+kR_=e95;n zEjscivsP&$&DaYvV=F1N4{Fq!Z3`YhsNg72y=Gt8-}cF9enG+KrjweUup|!$$4=+p zmVL{)=12L@gHcR{I{wB#JujUbzI?V9a(KhXK}DfQdC^dJadIa08hLNC$ZoF)@rZ4JkE@5vEm5(- zlSe_dzP?LzOMVk|XH;?LzpoNiPD4X8hdh4*&74c;CtXGz{-|s_fQsuPeM)1^ZQ6as zh+ou1)unQV=<768>QNW%jsL7We_50Fxjd?w+;)^7`3o7@l|LB`3`uspn!BDb@Jn1U zYt1xh%*x#OYoro|4+pY&EsaeSsIrjRwHZxGyhDD{=HCaBAANp(p*?@i|GM6Xwh90J z`0pP>3s?33`p?Uk@6K-<)bMiD{L3{qO z)9GOa!`!;CY{S--$IF1ZsGGEf@o+2ZuZ1Dmj&l;ii)5oOQC{+F{mXj9DF>VNeo~YE z$I^#WR)kR!!`KmxtYpx`pd>IlGcTybQvnN%?bs@MAPRayP1(2o3r^4yc?J&TGV=}K z)0CpDI0~-}`2D04=YP*gL1FCk$v{)~M5$2r+?bKZ-u=e*K*e*| zn|%!L&6#5@lSK-~^B!4JJb$|o?bBM&W9`*i<%oAzm|C_uw(wMpu?y*IDtYd(7n?wZ zU>s&*7kXgL+cQzaRAuw>gYF8x2s}j5v$|W+;pSWMrJD$S&-=pcwZhHg6~L}KW%Jd! zuAM3u&1~V?z(3@JmHQ}NopBdD=|OtLNVtV-Afo~gk@fz7OoRvb62Iop-3udd=(ATV z)_%F3pur!C2!haqUs{xBuod=(HTn~u*3LdaaP;9YOKSA4lFFNQdUg)-wNFD{T!Ftc zSq$b$7b}|WCDy3f<%owMaYWw*V)2#e*_;sdEHr4fl=;l$&Khl7)re_bzI%o-?*4uC zGZ{hu!$#fjvZZnx0oJZt*;EMvogsL*o2yCVg{tmb6j{%SS5@2;=eOduZA$+b6rkcN zD5esF3PZu~rDIPVcGtQpp7UrknT^p1(6iY1{+(a)AKQe#2<_q1v4p7SFxCSzg6Ty3 z?}u|cZF?-U()S}{>mFJxBm$O!S&J=gRUP8H=@dQi?DvkmS$^NCFLfQ>jX_nA8l@;> z^NF-BH;DXR#oM~-cnt3$?%vNE>a6}};5Kyi4xISneDrW~9O(ze&42xdTa z%U>;g>j~18fCEldt26ba^(=CPhG(>oczL9AwydsbQU|D&M2h`D;Kh~7!}h>%SQ#x z*+|Dmi~-XD^L+>D?18DP!}_G?8ZaP&R_DcTd!CcY1mx#O>&)F1HSL*89k&Th-vu*=H_UKJdWeVpYn#Lv$# zao-}fYnUZ7dapv^rUp_{{cOEpT{tX?cUt;J2TAtETU8HXpJTpjtW7uUyY@xaZauJi zCvg+Vd8ycT_xR`UwTV*tc74_ky)n+&dRDBYB;Zc4Ges*aA#4u;syYTn+aa%Em;ytT zCPF;~nPJA(lNhCyX2EOQBKO^b0ZQq(`~wk;)OujbC$vo1oqOAvp?;u^SLE2WsP)1L zqt25)@1|4(69J}F2=?WxRXg?Ao84-lhZf_LkP>y|f>|aYS}Wf@ESfp=*W|)li#TJ= z3sb6}&bgvBoc9S{N@r~2QM8|lJb#E&FXeWF3;I+d6nts?iK?fS^30dB=*PJ17EnSo zV>Lr5y+Rw4FvZ)9x|FwW5pGZ%S>}4V25K~RGbuPk4u_4RD7;@rg=9+F1ZIRf_G~w^ z=)a1q-}E;!WvaXptLDa0CKUD$K`oBzMN3596P(!J(&)Ir9=^99{g^_H8X+6fzW_8H5Sh)Tt1DuGsBbKs8J& ze%75T8Qhtp&KcQRn3G6X1I;|;m)3I@xidM=^EB^ZtkH{$C&oEAKAW`G`h-#{-~UAT z#nF^;(MSWf)A5ZLvDBPb@po0?v5|aIpFR}5dF#{2@~vAr&abYJWWn^*dIo^YK6qtC z3ue6C{qG|sV}lCn5B}N;_j6kj8JzANEXWdN6vl~P6Vo9(qqo;bF7A$bE3a-vl`vV! zc{aG@rmrZ~PU~`o-_4h7Sh{ML758GcIDK?b({gChXZNjE*`aKZulY+38ys_tRw3+} zTFgInh1<_aZ4)M z=h^AF$cy;zCn>rWtS@R_=bfpZDL_8F?e5_`vO_497N9@P9Ou|7c83h+)dOk~O zm4>O81L8`UAwrQ0w?`@DJ|>S2rs6DkH9LPvS81wvHM-AkCm70l8&22QwN$;K4jrF* zZfc{#Y0m;o-dG)}VlYtn=Ue~6%vI)$y}YVfjpWtb{`vvkf08#(M;H$MJVoye@&T2@ zDVY%8s}4^Eul8uCPYYw;Qs~^%{nOr<)cxq@kGQV!@>;C2xf;fI=G$EUs$REX>0r;F z^%R8P&BdqQR{DdNxNgx!aTyJbY>rmxBV%!BWvY$hNVK^5^;lF}=?s>-3vxEb{D5N}re+*^_|wkzU5g4E|Lkd>0| zNc!-LRdK}g8$^JmS}-cQjSugqe!8c*p(b9Ab*Um-Tyj|6GKhGQP4V^a_G@%kM>KQKq} zdy<`6Wm!8M&2wCx&V$|M18Ctc-)JLQS$E3)gj<(pF-$Hlou_|{h7S#+O#JB2ZTCwY z=ehCj+}0orjmG#Y+7+gJx_I}pGu!9OPVRNteYS@}DYgp&r8E&Y*kR-KL68Aw>iuYdnC#&VEcSo8^h9Lth zxJCb?y2r4{g+N(pakG3xfrHB&o%y#rT(7rJMeoaXhF@bz=Irs+o-GCpBKE9rjqA{V zvH}X-B)@uOd#33t49_Kl<>r9zEz|MkJt;kT*wTLcABRjFPR#HQT^y#0o>@OBd(Ggf ze4Ky#bdM}+Vpzrv+hySxb?6P{tHsoyVG*_RP5*?m7Pc2`4CObHudbVLiN|L>9DaHS z%E1!#@|g+uAKkhrYSroYKYS*RkgSqZVjwE_25?>fBhzzloKJ80sgVD)5$VEPe{mmz zGIs`c`J^P091Z+>_(YOtpBw9p$YtDy<78yqIKn2Unj}C=K$4M$Hwhhd0jHWb)asylZ-vsxG*Njy&L$?5f4zM+)p}`sos~= zxSqxo%K@?jHW3qZ7FC#!H+XL>JyQ3SG98}QgxkMoy;?TjhgOUuyZ zn>r3~7ALN{x47tLjm_be&KK2^>AA1+^|1t|w@9#$Iad$N^MGX6u)b5K`+f0LfBK2s z&7GE$de6JtSKF#`pS+&mPW@xh_2%>U7LhLu2lw$ztL}OfOjxYWx6PcDwLsd%pV6y% zj@+4-F`L&&o)p+pw%45Z0@d;tF^iH0nS!tcy%%Sh$8l-FW7aRQk80f@zJ7W;4H2{hbe2JF9) z(#<<)`ti~K{_RwUlI!6p^(PcP*bUfb=dTkf_WkB5pbDo$`oQVz{(dnJou`+A z?&ALU7@v$oBx~bJRv?u-xoZNbsslN@MtSBiOqt`R=bPVm!nI&44uJM@foVg7_v*^j z1(4hQR$rK$H0Wz&#;EbWeDecIapn$*5z9gS^e>s@Y5H!nxMYlGdik}vD+@Q_Av+J( zeNJfR&*fg7eUvUeGiu?O{8wbCW;qs^%=Tu{k0$+U3P%OPgIk)k@`ekvq zz*OvEflfhaJDvdlVKl8i6Hb4nhvQHXe-m<8Y<2S5ecArBGF$9^%b7r|;XuRYE3uCs z4Qd_s&VHWxpmn2)AA!*hu3H=A1*z)ll-F~l6ICUuO zaZ*Vub~ARlVn_dM!cC&L^vXxUn9$ z$$AYq;_olHpKWXzBur6E=+fGhV`H#c-^jKsVvW@CC*@bNZiBskE63B)wRUe;nL(OT|=Wms28AHwN zYejW(&H?PfEyQ|1Ip%6`>&?&uVdp8@6Y!l5+It1?k8g^bC!=T(=<{`UAdo@Yx>cEG zJpO|`o20lXpZ!b#)f?n9EKACO=_XP2Xm`K9e#)xQY_h=fzL53ym7ZR!(va-Q1as|! zNs8larJ`DZF#D&cLY^#FZU)f1G{-v?_LHrd-i1{lm?0Jwm%|rGkHD ziQK`gQ*p8FrDJCwOZVw;QrDYwdrTdNr+(>Z=KF~{d9_t_W^OLT6A!#rUy5Erp=?>E zd%YI7gRLU_tdkVq?CI@hkG$g5-iSA^=uc{RRW?}Y=UFa2UKR2>2r;-;dvvq6KaL&% zWQ!E+)#VaQo~n*WIuCgbS#9K9$UffdB8-!TSetC(=YfON)hRF0$78AiHB{zvB>+wA z<`=Rx(%`!xmhZbsO!k-%qLHL=7{C2v#n~Jn(m7JNp|}<MYzq1`B-SK>Hvh|D5y!2VS3 zg+2-z^!oRt38~z7ZvX8+3E%GntpJrlbJLxxzQFQ%|(}!Hg@gfDEgED`n;rL zJ)C=KYX9z&vaybCIF9XW^il@f+iZ!SZ}5x-Ej$~ueRfYBMXIY;tPFQxRZqBYRzz)< z0zcYPfTG{iyyyd1-MWL!x2iyjDL+=Z(tQM?8|tI?jvYO+GxW>H1_i zdZ0_UxTJ5fHu;7V=<;zTs%go6j}88*G>G!J;Fo4ZZ`J;$ufn*%JpMG(BDwjVo#;C^pL5qJ_ud zA2T26I;C`2$guJ7t|BUg&0fp+B#%i&K$)(bDv(;330ZI?} zX{j;=F=tmRJiPa)=7_Xf3cM-<1j#jGP8+FCZ`|Avno`yA)- zW4o70ukpvjyxS)$r{tMZ$4u!BNz~tj$9}y$7SBK%Lbr(f?;01Z!6M1I@xYeD!W<-; z;GA|j_uCUKS!LK&zInf!b!OY)+YnoJ+XG+q_2R@V6@-3#t$;PtyJgd_E~NZwX0^5r zT$X5TipbRqQojTRYidB}#f!=SwVT~c9f~xUIxTha7nw)rJgpQ6@s2lDJb4kqw`m4m zy0Q}ou`1EJUJOAD*+RdcDZlm!Q4qSMC~%5O`#e3+^hJk1T=>(%CC$8}u3>YqU0OKj zzh#3;Oa8Fdas>H6%+jPK=5+XvQ%d)Ov%b}$bS^Qnd=-*v)1yI zzOW?l>U_-phPTz`59)|(#grO~>uzs7zQi|CMWv}8S$E&ypZ2tB<#{w9GI@I zC(=L?cw0v`TdT8OQMpm8>A!Vxi};lJa)SS~5Y2T8 z!OuH1f=b-pV z+Ng<3L)59EzUzV0z`ju{5qj*iv+Su6|9;7rQ0invmx8N$F-*|ZWx;uDG!n;p>&N=+10_3Oy*J;X?C?KxCz;gtG(xqdX@-S<)>3=z{`w>KOWft* z-+YNPdA|ShiFoVH6mBsb%Q0IYc*k~dY0(brpO>G{&c{c~Sllv9{OvcvFRA3|)48^O zyc%X4=fL$H9K4~dg_2V|ao`#UU%pIjsLcO2lRj5L-s!%?@%A)$wH3)dlhZFRyY${W z5>^v&@5{s4=Usblc1Kv7B|XNG*DtdUc5cK{x;t)COjNNc!?`LVH`^WTTvfn z21P{4)fEm7NE^An5*WN7>D?uFwG04~y~7$;lu%}nni>g2PCl@jd(YL?940>I^Rz$r z?84>8QBxMDUpwj_yRdU;RbT1Rql<3llW=w3RL@JHpW*!5Q&fL@YE6s7r^{SD!p_|2 zgDUcpmQU16Tmw_QcjL{;K5laBr^pyT1*^~2ZLi3ogy!ib9Ur>yFM_W9&9F9v{l8Qz p{(kGc5B|RuIsX4}Ti&Hp1vX=@E+eAn*tryks*>i*@)zcx{trZvQ4atB literal 0 HcmV?d00001 diff --git a/Deformable-DETR/figs/illustration.png b/Deformable-DETR/figs/illustration.png new file mode 100644 index 0000000000000000000000000000000000000000..4e3d3301b2213d218b5a0c1045be3335a08aad1b GIT binary patch literal 272167 zcmeFY^;etS+BI6--CEpTibHWLP^@UMK!H-MxO)o}En2L&6n7|2aCa$E+#P}hf(5?x zdG_AV{@ydr8Rrjpf5^B;GIFo1-0QmLn%BJMig>H8h=WOv`Rv&<9AzbWt!K|re4af+ z1fZkBKjF$hwt`;}T(uNspH+@i9>CuqTgj-&JbP9Xi}hfJ0)LO;q-5aw>=|y)pBKV_ zWAXcE&z=gDEK*Hp zU@tF+=6N+cg2h5{6D=)LgbV{Mc1*~i46~|i!uNyI!^+EBKS>W=cU^$HZuUgcVNY9N z_Wh-IR#8@IW!8ZzuIv82Kd|bkWY4d(^oNNn6t;|r8`iVoBtrRLm*q*bsgeKjnUjdH zmp;Vh|N0*Mu5lb{QqX_j``1XW!C?wK|IcC9|99Ap<^O#MJ^%X<{v_MKA|XcC!_)Ic zX!DTh8;9ndF5%DniwJo}P0IJuP^)pF7W(3690cxwAR zQDy{)6U=Ab{P&kd|DBpEXHQhlA{iD*GFstgsp(Pq@8e#!kmHWzSXTZ29O}P*cW3=y z(BRhd@?ImO6KHj@ zkV3Tk4)nz0G!lXK4ydUuGU0jMXBqg6#o?AlM0=Sp`U&~8alAyKQs^RJHHZjmRFkK+ z`3HNVsUABRaKl{7`IInuv0YuA6RTM_og9{z1+q=mYQInHB^#Y~!>r0#D}hDRkhDk> zdsB(|BjnSjC+zY*G8t{$7E_kf{@^`&Yfw(ncwWjM98|=6YJpz{*n@xI_q-G)Q;fI` zdllBxT;nq17l5ZU<6llPvGFLTZrrhX(#hMAIwcPVTD|?{*`7Smd=K_Ug zBQ;3mG1h*}`j7vN+C*CTT$;`D$Y%9$*q@H#^hvb3ybZ(I(qv_sE zKPVPT<@$7ebv_fm9d^FHQUw_B-E zOnL`GNRQXP(|H|@-DV22Cm!9aD)sjrx2mvi*%7@%KJM4B9&dK+B8V z<<>x_9a)~svHWxNbKA~#GuS^ZUTtU0Me1ebzOk8UppH&G&92F!@gC_Zgbo(r3#@)4 zKBtZpp9A4}ihpwDd7X{}&l}PH4O3xC+&z8EIAZhXyStmLya8>Pf1)8NefQE=y7t6^ z>xuPL^4w?Hweka@BgbHNGaAcif~uo|(?C!7U_kqX9^?@o1V^5Nx251lus%KBRt!8) zkmd>vU)yO*PeUT}plY7$mA-BVMO1+kH8xtaA_4Z_iJmC-(mrTfk3XAgP455RYPvT6 zYU8(YRpWZsU;-szuPhdCP?~L#XSa1PZ~#aXGRrAZVEl-%?EEBJfp&=*xkueeC9zj- zO6Cu=e{mbok+FYb8hbkblDch=VbPiD0uMc`9Sz(K+q=;JHUqJE38b(G39p=!F6TcX zefHoPg<5rZ<3hvjOQ3Pj9lg)akW8sLr$ym=?`2!a|9fC0)t38mzd|b&-*G`IHTGp( zsUrcw_`IvDPb$1-nbry99XqN|$Rc4|i9O>p;SDG4M@%%~AJ}ZQw6>MzRo-`mx%oxDdR6IBB%R?~|H9DoGr$xVgYjrs~LT z&zntRf_s{AU8HLxp5u|VqFG)1Sj2rJ!61=vR=snFypo&U)ecUO8F(!!k4;Ss3J%%i z@|Uf1ymwjLX{We@DomBBx&QY0byT31KvJ|Ai1*cYO)f7p__l-QwJ)-Lyxc_KdX4tz z44&Ba;{kx2CXwOKY!wO}{Z9-bm}W4M7U)}`scpJ5s-DB|PI&V2y!pd^{tH!mALL+P zA|8WNuARS2p%hw!=zX6k?}|lL*&%!DcxQIs_Rd96mC1&7-Vco7pJdH6o9L+?1V#o> zRu18$_53i5m7hUg%JD6Z7#UIN4(Re)u&Rc(%Ik(xdwkqIfGtUFiyeG=u2X@kz6@i% zkf@;Gh63k0cqDfH9VpLzY-S+tu@^Ak1=!&b-Ch;Ab+2_eM7*%))cpx`Lenwo+dBvc zrd%i}kOu9P#d6X&%1|lw4t5Q_+xfEqmw%DA|4yN&&H%G%SeNermDD8EVu8|L^l;B> zUGY-Wqg>#j9kVa=|EPm5X5iVd-~#iX!gNUq@a2D$aqlYpK?+&F3qQq&m&Y`(sO2ig&UGq z;>AZF$hE%-YSzq7?{*FZtfB#yjP(H}etg83O#$4GM6PDl`HLP0Ty4i)N^L?1*DV4G zd?C1PU}ATZen|U$iOUWc%0a{OhaG3x{g0U1?jr(ihI#dJ(_HW?;SSP(bQF?7lhFKI z{9I>gfz*QTuRiv#gk|k7bDoO`+;hoOgG}$g1s+xuOVOFAO!&!Y#17*c#=CmO^(JII z`7FcMt8HLC@|~EG$zjpM?MQe4@A`I4bZ zN@6XJ3t~0alhSMwoYx_&&OrFXwpRwKqnt?u6^|(|c=9nlppM8-0IN8ERZ*J(QCDY} z!YwcIZ(P14*lPW-9*BgS@hyOAG~22IuXai} zjrnRt}c$vR`j6^_tYA0VbE!Uf2Mc08z2d*75>_6h92L7@%5wDC*4b1dkjH&sm0~491*lS8Fg!^M+?ov{Ca^wzLZhCA7GWjI@RQhH-OMq5|QZOH@54?nl2CcJF>Ff`qptCwO_9moi?-LdOPVpNmWnY z?|!}Nl)^l26!+3kxw)V*BMhIshZ;ngL_X&5!O7LvB@B)L^~a6$BG4RVr)iwXlE5m;3;%Y#kb=Oa-J@xS`TBdQvy6S3U}~bZBp;0p1n|(~JsbZzXx38| z;4skz$MTFl@;^oEU-GR$1wfRRh*ojD{5k>VVZ49$k zBVv(L`VE5f-U0S2q}>iDalq74=uW$^t<)zs_4Vs}$tc&){dMAGCz8gVL4YuLC>KD( zZfI{y<%6$t8jz&Y?sp=Rk)($4-hA0(_L;r*32gJ^_gYF(>0qRyMcfv>a=gTEwjGz2 zF&mzBfOckcOVQ*3{1lDVA$HoVfcE}fXn&5yjHSd$?6&XPm1JD-(%XSap02xb`}He- zXY@U&pr%_6k@07RO-MYL3!cn1c}pnT1nyg9}@08Qm{nA1I1z`LzZ%?JG4G1@9OKdRqJB5-H6oX ze8~C^qs>H1p*;<|&y{^z^2r9@eM>pMKj~7E1m9TTW`!60&K3>O{qY5-yyHaEDz5BW z;L}y1@d5S0fJf@SAeGTXy>_7a>LtrQ&)d;~v1U#FeSf8=PRQ3Oy^z0Tmhn2gr!@jN zjn#X>_$7uX%@1^v|GecaAVY+frT!Y(GXOI=kS?MqYE@>s!4&d+r{H=K(zCpvx5cTx z>O!HORbjg%s-*F>Xmr}^Z^gf2YmgtQXAkWi3VWGogn#);f7rn3u3fDDYV^{2CD^5Y z=4@j$cLeI1x=*q+l-Zvtp*u`rjZEF=-*_bT!4$sE+_Z`neA~BWu!Cg}_D>Z^M-ZA= z50GOkSvKrf>(5KF`S!!ElBHguQ?clx=p*`TXdF@W7wmEJJX{NN5`|0Po)`IQeb1(rY-pW!tL@vXjcG(xZ{it8%p-Qc_iZ4&xRH%o z&Hu4fq)9cOBwzLDl;3*p^foY6tAp``EEeEuY=|1EC!x4psh8xld8$EIJW^l>obv61 zbO?{$XCz*>+#hzVDmE*cSHO4N;F(s~TeMMMguM0v;(v)z84fp3F>tC=Gfd0o<~OAP zW<&nK*%T|^>rcwLe@SLdjxi!~sI>{K*cUu0v!d#PPh#%XW^e0w-YT89FDVpsOUDEd zBgyceKXM*93B*vb=)sTjoEH6v;&gb$T+_7O!$z3+de&v_eCW1u;~I*WD|0F4uD=?{ zf(uUfZQ9qJKOwbW^3jE4dUX_V7g!hZhCrh%4or?7?Fy> zkGQV$v!%Ec_xve_@YGH{MkSK3PObn{GazpxfFw<-ej6fhOuRG&lmrfCPsp61Vy0^xkGO-KGA_$2%{xIik~JC;TdbC1nkPsi|#W_h&6R?2V)X>ZGNrJKF6EpbDpU1yjE5 zcUJ**O)ReceEscXN4A^4{jhC~!c0A9#SA*u?%I7g+GqLhgnYnHop!=!hHWn?nRwb* z$vO3sr1N3d81o2xZ&U0&vz0B>J~~!-;X>Ek(N4W)KC!H(?<`3p_{dMG)ShOcomtD5 zT^p-xjjk}W*+VR0wB@yk$uV}%@m4PhYMxN!sr{mCOwcOUi=2dEJp-2VJk>ucpwGr% zXjsfYmfvS7`f{X=H3#iWOncOml6#83)8s-^)hGb3M)g!?aw~zmzKe6QHTa(*EV~a- zdCh3b^8m$uf9P5+N~|m*u{sT0vpk)7HdRK01#0vugqk|S(u_~3BkI3GZJ-O+Mx^4> z^M_qI=~(DUb|3 z6v~iEwi?&MX7&(EH}%SwI2h_rUQm8qp?$&Xb7jHLW#vSYH*Y*;r?Y^&{&K3UBHvJi za=9{XH4s#9IhxSyXgG0whvFM}gsR-JPvr(irHO43-nDlZJ-|J*YiTx zfihDUPSC}yCJH=5lZ>$161##Xle(WjLp}WUf^;cPAJKnf#zC2j zAiJ^@)*AlR@obrbDdSX`28DDHL1%NI=$CJd#14Vzp9{t<6&#BKNWrh15gXS$_3RWU zJu8Fd9ZeO$JEF#rYn@fwgsTgeOY$g_e3a0+WSLPK<0-NI)hodBGRt3gJnHG8GH@9v z2rdIv>S9mb)9dV+s5yj8IOb1v&S*%D{h8hHa97}mtv|C9{6{|;NC~(tsV8pQiEe^j zFR2DT!o{YmozYDV@KLG85g90Wj(}1Va+q2c;~QeT=7Pp_wj*B^@DYZW-%1%4vD*IF z5cbn-zW{6qY;X_e*2q?z5_|XfZpyA*l8RG*p9Tq)W@6`bEUHV>g&~|CchB&P0@zi9 zK0YqU#(JNgGry-=e-!)0SCbgczSPj$tAz#p@(<-b)zKK%UY`gSJ~C9NJMAxU0V!YQ z6Bb#NN4a8i7tMCs_?%pete^X=LEyHZ-UNVhvTNo}mn9;_4oexs7TU3A0OngwD=kd9x zA#M1y{2OW)cpaF!KifDpuYLU{FJ+e^sqiki&n;pR`ceoc(kp3~X59u$)Sz5LC2v>M z5j>NgDg!)Q3nF5n`K%G>rN6GZ?0FA{M(PA37)BKv;HaL_E7u#Z<8XqoV)?-Gk?j>htQpvrHj*jVcp!qUZXmMb!yMV#V%~Y-{ zM^vjdyr?GfdfiQ*k-NLZiQ;Sb34K4d_$jvuo>|fjdKF1P6;1$+8FpmE)I zyS{~-o?c5OMi*X1nWTM4Zv92SAuyih6XxX)=TZAEZ!7bM7CH1FkuYqDABzMLI#luU=lB!4!9;j72n%*@QBAhtV*MXKZzdLfVX7p3LO<$W=}f4|K&XY=qzk2QX<{|5G%>D*Xs{ zSq9y+mw+|u2Ojp!b20kB7=8Rh^rkpas$!62WOj#maENWNT_xnQWrFT6;kw!u=|4Yd zhfwc8+q}f%_q5sJk0NIpY23qZ8rpm=`lixTv$y$N&Kx?#lHCh}u0{#BpGP-c%^97m ztwGchQrfneisz3O6aF&=Dn>vROKE=WRru<&DF&aUb5qFCIz@EJLv5dgFEd9Q$>PH5OtK75*S{k<;!mFHk%)BHli+17xbJ2(- zTk>@6tp%gh?#E4m>y!`GV)MOvWkE?1UyU6K6FVVZm}ADo^*T!UiA&_MUU7KMZ{LKx z9V&{SDCl(-B|tYIKsO>_q}OxK5K{ORTCF}B@F`9{!}M27PKlbQ_1;xlB$4UF&&O>{#PfgkP_n9i!ekBF@cu}JOVa+JZ^ z>p@a)novw`EJA2L>*Ig+!VhqUg-g?OhGP2Ie;w#MYSMo#yY=%Ux(re7kJs#9onA#c zF^4J%eYTeTBF`54zTm4{Md;{zq~HC-E@MI$y}z(8D!4_-v(sL`9vu-dRGLj!QdOll z`7Zra=k<@b(jqBxtlwm5lkf+~dT#Qw585@rFdR9EOP~on)t3KBwfBXlZE2#1e8GiB z#x?;`Iy=z<{gAwydHKTlCqaBB{_H6A^ymp%EW3RlaR@oM`KE3inn6d*)PuwOsq7 zg4O&yIl1ouZKsOaijx<;yKYn`=7@ctNHC7RDJ>#jJ1S{0kLC%V+=SBkAfdLL=_12C zR6C&aIG^<-VR6jfBwcZ6pOw(5JMcWhaP*Fzms{jJEe@=EM%_}Q<(aTdBMA5B2dEbG z=(88q%$DZyGmL|PUE^+QZwDUDjWAb1I|j#9qim!_J1$z+dI_oA{gd2GlcY9E*zWou_gu(t>g#8OPB3&3UNb3UAt*eYEW>IjeEWcm*bXweBl*toCYdZ31hQ})y zs5F;m3C_t?2OQhpv^K?l^7gi%AOs>Gu^t>@*-Zl?O^Z4UHbQw0XV1DcdLPWKnRDW1 zkj$IgZv}vu7Ba>J#89SGbdLf@W@bxA<8Ppn2zIS{a;>XZ(^sf`-_+idYlBqPBfQlZ z?Qj@b4z(`&`;)eDcJ|F2TehRSX=2|UA_cy0)4;+D)DOUImV$TpMPvSH#PIPeH5^j9 zf0vnYuM`Nx*SQ_3O)-lZ*L0DHq94D?Rxb*E%Nl|gc+=B#IWBe{3249g^oyx|&$k1w z<^?K8`02ejoY?|# zg1B!O%uRD&Qc~lU3sLm<_rI%O+;C}$N5A1sip@`owx~?)9`-^#niUtaMfvzEY9WI& zmtVJUl~VVjp{8+CuN&}Y&BkD(Rct;^)~iCd%eM9F7eidEZwjM2)7Ah!>+qA_oYy($a87gTQb80s zCXlP@wIaXXjKAr-NGPU2$V^4+YQpOZF~u}3J&WE>2q$p~Pr+3Bni5y66IZSiU#_n7 zJ$*W7e#lmBG#g)sfPzNxr@pK}wxUS74C^;}S`uN#Djw4A3!bOcbIB`->(~p|L6|q3 zKs#gqr->T(T_#U=uXqGIt~=uN_Z^$s_n7y0=tfcC zUD5ePO2@;eU;eXYrqNF>^MO6tkGE)N8c!~yr}~7yeTaTXyqZob%?Z+PXEqXv+XosO z|GMtR5Sf3$#L?ZT^9F@`Z`BvckgN0_%t(UF+g7$kWPE16O&Po14qb%=?9r z(ULxK&rT9ad3lwEGIr6251RG@ni<7(fXW=LG7Ecm-FC&XNssI6Yt0gMrM9$VuGoM+ zk;30mfw4IT5X>OS^iUtQ1Y!SEUb!r;r)i2_0o$?3q^H)?s+Z8RN0UT zn~$Y#l`ato=!USI)P~9CBc~J1QJKx{QLwB#J*PsbJN(PUx&?q+ZY^iylu zhs()_nA?6|pfGyymYes-yvqx6sO=2e`rONYwF>|hR)i^&pTE|_VvF?!0CPP~ZWBk8 zgfY-vHbfbFrnc660XI;;jRw5}C{KGROS}8}<*q_FyPr0}>@ZzQe7CS6C%qoK^5)i- z)_(t+kW}Vf1uPU9>Uclgcw^FykL0;-h+Xfo5fa)W&>w2>36n4T?-O$-XuJMr@JvSG z3>h8vsKC7L`?z(%T2HGoNAO0!0Gxgp5Ia}eDakus!=AY9BvVVCdDf zw1?E=bnWty0(Pt07W+3Z7!B+AttZCv-&WFUvomC=D)Xw&_EzW_T{E8O-JXsdI%f~W zTu(|p9}wWZOC<)$RH+eO94rtBROjA#23ii*%^`t9O*xnP*u%P>={mh$$51fy^fjv& zHS_d(*C>kb*77zsj5l57T}fiM?H6_*S2*C9ml<>?KR*@VSg@I`7M_W#_4M>1ysuL` z9uulwOT(Wz(z-dtEVcsQ>AbKD`r?Cdsk6cn8f}ia6@g!CM&lBJZq7>+)WsVjs~4gK zIg~F)Cz9v}sLoF@Rq5+B*iDvs7A)x2>L?r6ZPzP#bHwujaPV@tEC%&~>n)1w_>3r|^g&4ksV6Dc&`b9;uR9&vPd&D*V2AZ~} z+^{@}^IoExOQ|Q|V;@emdThvy z@>JzbrS;#)Q`JQ=ItmL8m9cvt_(p|rR%o11=*`Vlx3tqGFLmzD3F+z0SGPCQg_h}? zWDYF)P_>HLmsjY%bu5f$>(|U2&1JjZ!0TJTQsfo=EznM6vvg!`RHhL)DpL7%i{j12 z-Lcjwd*Z%sEdXQvz_%6VJs@?D|6t+^fcTUH{cvw!4o&`y@X}XA{7eKG?jvqpM5Le7 zfHNzeT@J(5eJ7r*<>oNGTFydkadqa-?iRvVA6HH44&$g6hE9z_1;F@|lUECR@{Gekjr zX(?Fsv02(2Kvth0T7Ba0k8Dkmw(WjIKPQ1*q_XjR@a@_Wods1IvndS;CNEn}>w;Q*(6lnd;UDR=*EQaVkowQBaleQa>QNygzy`;su>!*Lxj|W#S0q z==a8>+9a)7hqibDmorFP=7<$mFRw6@VL=Q5s<1wq=ir&{H9w_)lawr;k2zBe^ta)(|+nHVh&?xWc8 zZC3Agnf2y+Ur|t$nu4#BGdk*Kx-KpVcX#)A#pY#ejO8s#>8~>fAgZ`E#WYxSEQbMj z=3T|_JpwRVN9y`{32Gzf?Eu(>AjZ!qE#eG>6XKN-CZ82IE*FsXX3_MqLFOGotvKpM zilb0^8r5Hdo`u_U}?5dnBAS1`%&rj5;EyLG@$@uItz0&EugZr;(vLYwV1t zO{YKMLTzAT0(W$E%Hc_bnBLc$_TW9P<59=8@OTfYoVPrk`8?b`-I?ADvPgb*CF7>m zWH%Jc`Y{OZwJGKe-!F)LtF5_)weL7=AHI`9|A*n}uRfk3%Vz zES`Nkj`RIGI^&L{>2+-i)h%ea+1VVj=f4Za$$gdWViDj?s#2z+Z(!Q9JD2R2n*Hdn z0Jr#5kcO+I-6Y9+r7WOG_(k`Z^!2nXkYcPEfQNG%nm9L`ncBjPbp!)9m^l^~?cQ)XSi6Y>7xYUFYRN4Z-G(BHDsI~C& zLT5lsnMKS7zN3<1JMNL^{Ny1NT2X>I$Ks-2@!@4kw7JlW&i4z^L&bPA=8qR%m>fyO zb9SO1HIiynbV1s>#~Km2ULb~?td1xm?~h}G)g63MurJAe;Y+8}*m^3WH^vNS_e$67 zKh8*yt)COieY+6^JRDv=eAc|_^9f0j->0onNwq9NP}cdT#8gs>rI%3AvVTHW=TC{> zh~(77%}(&KTRAn^xfz4W*JjNP`;OOJP41OOg!1C`S7^uFS%j;gMV3`)<(GM3=1?7i zk3=Ht7)=7phBGZ^ z`+3;QfIYlN;c>rP4s^?DRsrVt>~aRvDhy^HR#o=MX!18>sKX~hrFE`aHwg<&rM<`E z!~Av69W|mX(+6j52NMV4xp(5(-^Y&@nO*j8KPp2KMz@xYU;N7UY|-l-m8Gb=bk)~4 z==QKOn$9gXIrkS-x*%K$E1POEEEKC4$x2mRDvp903w)PhO_pZHmt8rL5hYCib%ev& zBI!t}ZGSu>kteJgG`|)$@j0U7zM5{LSgOhgpF*Yx++mL`MAI^jR%mlAP0@xYH83DcZ}E^&F9*w@E(e6@BD&Wn zx4#tHK(oj*Wa7X$Rzd>pBqAVD0kQggQ60 zo26BajCgK+l!HaE=lgmE^CrlA(}-GRSx!d&lug$l3SHd+#Yi zX`aq$1}Yj?eWVWq_UziR90QJc)+ZjpPkkxWfs~K$Wg}Pea^3=5gaKvrVw$fTUd4PY zU}Uu=W)#gcln*dBE(a(Y=-oD;%&=-qpeL?#snz#BpkCBt)Ku_IZ=w8%H28t}g7K0K zdkN_x)dXJbD>)A|d&?#@Fs6*cyaCy_BE#QwIjd%RW{d56NayC}YgP`Frc~AdE&GC4 zYXRR?D20GwyWrzvb8kzMZ`#_andyp3ev;wx^pRIsG{P}ult1a^^8k8h9y<>DxFu-- zUW>yCqsNS!K$IHJD@eic>Zb|j&`6UZG3HPUr&kr#vC-Y&YWhuQTB?ig4>j*@HWe7f zsgKoXYou22f7S3yL=^ZD)(wR}XRa2CAMj{prfF{whL>Vrgs#boYBZ{>(id2MbIh&T z&rT6kdG^-ckC=ZiAiWj_)NswM*{Hvx&y3@n)<~7FN^DEpKas&SiPA&h9GAl$8x%s; z^25AiH^g9*?t@S(dD-sE&c>C0P-;&IA*tG#jH?AImvwm9?IX?~O#i6tW%Ne5<8zUy zY4^@Hapq*SP@YZ;1Lmp}yV5=^^1X7&D8!SlWTZ!cLv1#?@ZiEObPwLC9T5Fj?*M|8YtbiNbIa+i zp4S+RGIH;{ncTQlXGPPWi6*~yMeE~8tPe549Uptq(x_fUL~}P8b_S=;mvE^yg(|M4 zHv$~M^|&V%NR|BnDOyLriL26mTMURN>RFaozyy*H&(HGlB{{zzW@59P+4BCOA}XU7 zr%B8|7)}?@wHH?v6Lm8CgOyt-omH@?*9r5>P-sHGZVj+T4S1o&b1&T zeuI8eiQ)>qZuR^~3BoR{nFZ4{-Mo}iAFDo}8Afjw#>^7-1_~0k&n2~?y{36 zoI_UrE%~E(HninfFoZ`LNHwnzy}GmQ@e}5a;Y|5vF*4UtAJY)Y`?&CYM0QWgy5%-Q zu-Gz0`MSl}e7|-uR|PxM;e1hoeTPT)!3VpTTz@Slq5ln|(H}l!4(CI$S;^UQrxWnw zJ~6I;yAerXic>Z6RfQlJ9ddFD48b=A?&0YFwYbr%oI%c4@A0zN0R!%7JH4&(AA#F$ zblx4T3*jtQFGF81yX@I)H~h57ou^7G!syOA0THlhv==xoay6akJTo%)%?!n}I&Gav z4cOe4dp@jL;Nt1qxWe)n_Vi0RBW@(4^8}iy5Hk+UQUrS>39L+yk1(d+R}FDh4Gd|M z{ql&Zkq9zg+WV^$g7*Rz_E5K-0-y=2JrK_;Sf;QyR6hv`%5Bi z$skJ{GiH!E=J)gRWru(~P7cq@dU6*c!S`jl6}rL9URXAauF8yc7&4R*s>m96OFPbi z_l<$A9)%fY5s&_|#ykhtd;Y!nrMghc9qb&?wVf+{0k!2;zx*d<0q-V+pCC^j;+J6(< zv;q>g1?98=Ryq+T=dacqdp0iUNwK@^Tsv)R%vIktw(7~&>u$y~0PF0FMsXYNAw&ChW1 zo!8j0@?}AN_bYJ`%c?u+6}7VecmFGXCCIe@R24{Estz5z7oG=oSvuBnx&~H($LD)b zGiyCB2*!;}O&z<;lH2bD{pX+&F|cO1wdR|8(fN=@03q0KPwaeU4Yu;9DlG+o$e&Ke z9Q(s0@65#cT@WJtWFzM}Wk?zee$swX<&AK|W0NsVVE^DlN2ju_t+X5dgO)BFD=!FL z3sv*Sh4T+AbDDzk1y7l`i9aH}Yck9AC8Onq=)#qoIh4cVl)jw75!jo$%6rW2XWg!` zn%j_yF%O(23csG_CzKKRE-ylz(sMQDUS3f(B!v5`%u{jLom(}k~|l>BWOX| zj9kz2dv!IrHii1^bwX3%i)P~+ihkZPl)gP?a_LE7X1WrNDdej0UWUJq2Z37UI6x zHm`2YBj*_;9+Hf;gowkPNnT&&JAO!Zx8K1XKV#EIYl`6d{XEmM%Hz1__pKK~XxZtT zta}npFM!M=TVABl0~g(00K(p#M%iqFRKL{OXT0?>$ACKGY?xNc-O1wu#aj5SU~8M7!{5eD?k{e9vIjpo1Kn~*g-GcsbJ z=&mN@j3wmYFnq_bK~@WiBfQ*cP{Q0RaS>*kQ>M>Srp=0h2WU03SFjQz5+Y5O*CPGI zi7&iO^pm5jIW4sVS{e37XuE(6yu19`z2VwPf!VbM&3~n-U|$<^GEO=tJXh1w5Ty2d zSjLY9%fmzI!66FFOV|#?MK{$JLknUA-`%x5JRVh{GtcozzNZAJ_%S0=a|E?(zNH9` zGfe@RTPJ|{!Tv>bll@Xq47Ees!E0>tv;``8+V#$bRFeGs{O9%PuQ{u+ap*Ry&G^VW z=j0LWcElUV1wYP@ESP3=jV}{794fGRKo98beyx7?xzd_gy$;D?SK!fo8YaaxU!2oH zIr?V#yUf$EFy{BUxZlM>u~jv*w_5EyxDkW$3=LjN7)s~wM__Rj^Xs# zY4QdFbKj(^Q%uHd}#Gu_vYJss) zO%5NtwcT}BZOWZCG#(2{a2ZHrf@=JV?9lXXwkm+61M1rm7I^RYbb?T#(ssPVGobM_ zIW_0=XyZW_K!k}A(RS==51E=B<#9U|P zjna51fku=&N`%|9A)RL!pULl{oe7)H6ZM5i2;>P=Y$v_6R5c=DZ@M_N^t2h~$H%s=dz-ubGYJINfUn8h0t!AeqFo2GkRI*YEXbmK9#^CDj4 zNmuUpPJn##fVAcQp{A1CYvu8DOP{~$7;iq!7xPq3gRi7xoWYM#J42i z)y<6Kkyhau{F&m~#$2Dv@n1D!1<2r49o?&|%(PB*?Rp3O{PuQ%>|V!w#72JF%2p!1 zCy|UHp^S6*Ngad4bP86%#<(g+I*FQyyBJm7hdAcv8`zktq*LnNtRAv{rw|%h4rsRP*N|HruU<|j2BodfhJ_w8 z{k)9dOds!alhoQL^q<*wI}s-~r$GFRk&2-GbJ$|wg)XqU8Ij^xw>t_nXAb=EvG_H2u7P)HWpPeNJKfEb1=p7&jBB;>g_>W7)Y019v zPZiSv%cbeAYTpXnWT1iJQ%gaac*t{y)@j7s|=xH3k^Z+XsNS4>A9sOcJKHAgMG<&6@_)6*{yCsJS@XKAlWD3IegJI;41D|zcS74!xcNLi1Uj`M z)_?V;0dXNic6>yG{HPX8asi>&)YOdG+eeHg%A`UhcMomRw518%U)604Th#_a$?KBRzMa za_>Wke8ehGw*MQGD4N@NnyE_aJzE!$e*oImo;!}jXR{>*YEsK-QAH33D4Jn)FeD~k5HNXk|0b<7YWBe(j!$J4px#&eFNZYzytes@bb36 z>{d$7m5`hU`GCMyAS%K{@l^JFTpI%LqV)W5&3`h^N*nJ%==~DX=exRl&3%}o?~9Wc z!tblrFw=Y3b}HRRQNvbHVrRY>vwr$+On4+{+ngg0R!`sN=8d58*4fA>ji!u}k#6Gwd5> zy#=xnz}a~{ipZmq(7Reh;3t#U5OR>i)ydJ)uii4-nwLR2bY86?!7cl<{e5=8htNN* z5suv{yTwop5LF0aJBW)Coym9B`*6?C;Eu71aP==z?SzZ}VC|{~a*w5!e!{T($mi@{>=8u0i z4=p&;4_Ok{PKSCFK6E;{7^e!OC8bK4@23#6^-r3 zpMFWenRIkx+f~I)UB?ao!(~bbdgDZ>DqZA9I_5Hux?6O<2H21$=H4!B5i*A zQSMO-8%vC`qcZ(9eYmFIa^q+3R!~LU;j^}i>+<{S&GPn3F=3V>dxvs!ZyOm*AR#D) z+a{^a+_$}ER)@*qtkFRGJGkfIDZ1pHU2RakErHRM^RadTk3@BaLD%bw@RQf@W@x9?RZrUQ zu4+f>hV577HM;sUOVzR9p0uUU@Onw#g(Cml{cHbR?-AG6*YmuT`n_?!2uYBO!d!bYfG&7b1vTH*KN}>=6S(>u4q@^ad-QktVX#V z@&y%>dZ+|YhNe05x;UrRTj_X|0}iW_-hnfj`-+t>4)<#Tm?lHvaHG|=?a_;+#O_)8 zGW&vkl2j(gMneE7IbmKyj@N;(^$mm_dq@S(J^v(a5^6PQ0?SZ!Q#iHkts-t;6vS7B zWjh3}cC$Rn{;RXN@po;>8T@1N+zdf<#HNC$>h81OUkXS3;7qc?u-QIb-k1!O&lZW$ zwnDPr54yIMuVS`E(~Y4n9tTEl{d~+3$tQF}_&x)kj!|AyM^kCWS_vD*P|pzmV;3!s zWj5^-;jqW*W?-ieWiIQ><6v{|nR*mLt;IBsP1CoFyh%iivy!}%SD?q%7GSrIIW23mE&Y*<6Ns^`BjSEJ8d3c zsdIHxn)d>6moFvgqzNH6Ms4*d#6GG?Q;OZaMQhC4!b9%7ZF`+tZOZq@e-^s;`Uoyvu^&TBp(8z=MNpD@Zh{%-j7ax zCD$%H(W_lPv7O?_y?N6|wTuTZ$)8z)8&OVdK-`&+u`-l0UW4hWT2adeIrx!pW zSLQaMQ+Aqp^C3zHX~441?J}mva%+8N zk85Z;5pz#k*%ws(ICy2Oolh18wR!MVAtIru|PmGUdMs3pE)EL&xNxx^8n2vD6vk+{Jj+(%Z)4w|~tx ze@!?0^Ya(;`-{rGt?-`U%%%-g38 zJ2MSCfLCsn+X#_)IhEO-S$`^dwv;;1vP-LoPLZ|Ci5PaLPc_h1nIQo7Z#i6U&Ov-r za#h4AJ`ZJ*_;uFonP}^gtSSo)pl@G!fy?Sl+!|BwPc55RS?kW)j2z;2!YGAjgY&W@ z)04Lf|4__qoh;J@>i-`3#t=VolV22$2^2%C||e zNfI>V&VqIWW^xneOa+WqLWSrTLVW)&^TfTr-!32bAu03~Y^6 z2h=YL4Ml#YPVciB_1mxY9lP$G;y=V&fZvEebN=S0en8>2gCxW(`}>FUOppZO`vuMj zcxIF0+On9#?Q_28{ms7{?--d9_$AOqb>a9wVqa2})=DHfhQiJJWMRg zq00g%O?~pCmwMBtH>kvO-X7;iue28l<$x%bx=GtLA;GiEHpNRaF$eLG!*S=vJ8B*-Jz9!(mkH}5&uPLCPsXKgh7g?g_g7?D@_ST|<6#nOo^sV&&H#~2aU#+q6e%A`)ydgJj-4R1D3 zkFUW5$8>WR0@9PSW5B8Tl{86TUJ?xU1|%G zzL7>(?M7{v9t=5`4; zLlK+eZnL!^sJ`fBsYflJN8ebz%8@I#2R2|<>*mh0pAISh6rB3QD7yz1zn+&3!T^gf zTqDE1&Gj7JrPcu7%QqggzB>~>YITs$$nN8V^PSxRBt+gnP-pOIzKQT&-OI&~L}s?O z)I@dId=u0LuaIDp58Nrd^EU+09&OtyhB*DgvIt|_QRe(YNBzw4nh!S;{Wb>mG6hx_A}`yV zUTsn5&{Jvo@NafgQ*wFqCL=yGhATF!2uBMyUnr2Kqe0K(mnW@~%4bL>inTka(OOCz zi_h6sxs`QG^6o)L@On>ez>+11H~R0%zTy`3Q_O^x8iVCZ9wn{VOht=w!Cs>s%%Qk* zmQ{eS5;oQ%YY#CdMHQCZ7nExar0%dZpkshufwZr;dTnzCCp(6JioE--#u?lhNAtn` zDy<34;lV8sZuotDY>jM_KKzpRtt`O{Dhx0eKZcLBUgWMM9xC@+%b(Zk?w#NnULTvp zx!t*Ny&Rg$Jdori-F3E`oXI@>@vncXRr9~4>VJt~TZ-h8nW^dNt;^BUQPO7t(oI{i z(^|l0>D~>2`HNnyM=wRsAJTiB_uNM4b%i*IL?OIrZvF^8|} z6Vbe8<$-=r;e9XgG`3S)lLw%fWk8H8v=lrfxVb-CKzEsPlEEk@c-{&fqbnI(GcwvH zCmFt_5{t|ZpAFDBE~QSxb||hw`va$tq8{w#Ik-~FSgW4`Rz;l;A-+m<}z`>UWly(K(l2xW&+nI#j;?Db|oc$48uhddV zo#@oaJ8oAq2|Ox~!X28{IdJEgI6sDf<()Vb|Lh68Xp`V{=T-T=xhxdjG3(0BEJggQ zQm`7!EQype*op@15fWmVPW9pD>S^#EuK4jXY$TZ2MVoD~_ zb26jI^c%F*zAhM}Ls9pdV!M*9?Np1@Y*bfNMa~7H^XunBq;fNam+HCcvb3r;2n``w)@+{WQI(}-?da^MX^z!a1l=b@ zXJB5ueP~DLoL8o)$y0^eg)l~?$8@4F`Mw;=62}zF-YSFdV;25efkVEw{5P28AGq2? zI7jWYt({%-^8P~XOZsMqs}6^S`P)n1=(i4-<4=>3PyFfKX`3XG#6>w5Z64u4L(WwR$e#QrteG(i3fc7 zVJRgP0<1}2cycwn^8_@rPYAP1*LRXJX)CPeJr4{I*l@{Yt8TuhZfNoD zf6934#UYH~`l(l`=GkX6(@DjjhRGCCf}?^EBkyM@zTMo>*D(ttj#i23hDTurYQOOG zwse#?bCSoM5yzbjHxT)Zv3WOQK7(ow#oavt=!$iVF2PG)7EsY7 z`faB%1!~9aD=2z|kK4FWrW%W*1v_TM;73X&3Df;r;0r0vAdX@d|cz`jN8NJL7o=D za^n3otRIqdf$|1MRqoLmtYUu;}jI|r_M2fC8*fxX&UgYTh3?%8$SODWtsGxCj( zKOk&j22ueA*`N0J?`@7rSmy~nUL|0|cWGq+ycchMn8e>lpe@NGWV4jx`|3s$f?<)x|79(0EXs4XSJGo4@r0){b7P-j> zT~vG!#RCqVjGGiDczmU^q?2y;vBQ{od5@s~2}5HmiDh?)RM9lIL49a!EGegZA!x!L z9kxnW7CVWK>i+pQx|I*t-9T$;Sg6j091+7nHifmhRQC<4Vp5tW|JG}bXFTkHAu^1K zpDp~6wzE~a0UmuFMXS8Z$+TsiJ=JH|y2%U*zB-yjFT&Sjlup@VD$N4#4sd&^Km{+B z_d0&(DJ1efR{A|`1F9Rv*CnbF+DBgO`LG*uZfUr$K42dvGi$ z7nSUrlJ)#V%LzaYM=>H(Kbc+(yAifFV-r05ZH(Ld^W049X!u5HGOjmoRky9&)>KTj zgs!bzQ_)`aH77{nJ% zXz$m_Qi|!ki<&4k2~cm{MdNi401L}|LPwFz>rcB4;k@4>nRC?-2M;hgRS!}9#@&#V zywVo`7$e^%8p3ulLg6893lw=R*z=N=pDsG**w<&qJ(E&;Kl$IJ7ST7F|0W^*15>P_ zR~Rz+o69mSr*7B_;_nh6aPV5DS-n4ek@u$xu&gXMUM1g#oz7j-d)*wpH>RPH<9ei% z+oJT1xhwbU1F+{|JI|Bz&+cCon9^mQWj48suyMoFgTRzFV0R$cl-Y7~P)uH*Jf7@m zrADCpuB>jdW!#Hw#`I=dl@L;bE7a*+QRG&iKsiGQWY4;>u!TlDR)5?XzQ+nzUbhAd z{cx=RI4Hc?rAu$vP_KCVPyS?6HU%^~n&)<6Q*gFG`vevazr)!Vo%=1vDH>igz0X08Vj0}%9m*MEL9uvWp zM~hd(R8V!8b)JAkF_$~9b~7Zgix112{*B9)qu7G8rZ_Hp&RvEjCh54?J5JpkIWVft z8t`-;xegm-(iS?DT4Lsw9W_n~)T9I|F9Hgj*!Tq9?s;NLdWmi^ zbu@G5@n^ck;;YrKUeAA+!`lSX_m`{<72HgR5NpOi(M9N3C(BS1D~u>3v(J6h`b7k* zs-xc7JUZ@Ro_1b3-hS-A;laaNF{vIR$iSeRSZO*lYNm^7Va-M(gN!1NiM$0mTryMrkA(7bRs`qCz;F&t#2gJ)u2ce@`4AHQq3pL? z-kys&4y8y6Brh#Xp(~4!WCh3&Jt}ZVE)oe}Z$3*@hsOgq zGA+ru_Rn{8E#s2&ziFu)sl*v79Snbn8Kny>>ODS=2_;iGIng;DuC9iS%J~)9qE`v0 zxKEJ4Lj3`+nSjzC*KCUs+;T#~?<;DMA)1!V+xf<%(>MUE(Q~|aE?F$St8|v_S8=Q_ z=)4$PI?CD&0h}{)`KKy-<)PbgD|s398+P~O>V$sei|?6>XmqgxmHT+~TP(k5YqaTv5u7M6a3n zE8E>R^3UkMsF5#3^C@$R(+ZI?PeCqo>6_}b`lAAM1-HK~8)$aQkF_CO3_iYUlIvt3 z-HKgc&89Kv_+X4p)!88k(rb!wfT`h!9cJs0?W=*W?am4b1NQ}L3?tsTne^>gwYX9C z%TU8MXSdy&89p$Rd|;#=B|T-gROA?hr0hj2%iXo(Xy2vQo(V2p*a?Ffp1974tshLN zRz;(%+p~24sASs^)cGeDh1MmueGMMmQIzZzI6s<*IOUZ6mr+3bO7x#2s?tdE_g%Tn z@%LT%P-+iAyaI5aW2BL{%w_{%A#C%#*vRato4>Zd+5uP)Pt`N!!3q__s8DQ@XzWsBpjKd`Y&SrMj&(cQiXbQRAJV8z zl#7Z6UAJ3p^OM+Z52!B=0}1o_>vl?!ch-6~>c;Oq9$cF0f17PnVB$R4rMe{I5E@En z+J}T%f#I(?8(-Y%&W)m`YL(S;BdK^Y0k&fW!_B0iV?fLpH_}ZJ_2vY`3Hn}}mAD)- z#P2FBB=|YDU?+DuFs|OJR(fB6{;Vlv_1b&DbM7k!9~LN--qhkR(&EvObGp`-DIFkq z`;E|Xeb4(rUbxYqyS891HG43HgTnlQe3|n=O!LEYb zC%&XQzW2u$cwW@2MYyvGZrhtqivLC2Di@onN^RTXk05?k z`k$M+F8g@I-$p?9&uy{1QwW0u7}U+UpfZAKO?#n)I^U?wvMrzvc!{x2+9TgY7(61tj#g^nZ9HJZ(Kc65o&pb z99YPG!y!lsEx{DGg?;1p*v3yo6rywwx`l+*7YPRxF9-*#?45+o!Qe=b5{}zec7P^z z8SUFgg864}Tl>GpcG;tbO{ih>W#9Z2Uj2reL(ApO3dViAoWBbe9$Q8n8%nGx8xaah zTaS!er)U!%f{X!9ceU5TfEiUdcYSR7SqYl&-0 zTyGEb8*k31W+t&71RycGkxK0>XYr@LGIP6sBQ(=>3yNJvS3$M!cD>m5MqfAm&KC)A zMh{&cg1{^c(bpIzB7QN35DRl0%AT@M!AHhVn%7A>I)Y6oCW~1sdZ_z73H45x;hV>1HSLE;>6c~?I|69g0#%Q0mW3R64L0{-5zjYLcz z*vqAvJFj$Tw}zM#?((cyUtpd0(a{NkkOU)KI2+kB%cp~jm>a)6r(5>c_lo%&siE!9 z`hOKsG$m#<n4(cInK zVMp+H4+W4_V2h>#Hu|F$a{WsT)CGAAqU84AQ711OqD|(dQ<~OToy|M1>=1G-98Ttf zPyDtx-tAY#-%yaD4d;V)CQrg)Qx!}(Op5${y3}#H z#--~}DCXWX4Aru%y=0DVd|lk9g!(x8AqWLV^VcQ2FWc^U=Iwc=&2He=ZzCIwv_#8K zGtUWpwyJ#a&CJZEKXWTkVV=9q;W}ocQvoOWTxN2$rN_^h;I^0`_%xA;yEzVvDKHCj z`e1*Rr>3LnNO7SSnZelx+tRUou&ySk5x-XjAwb-(&_x)~f$AOtv2UX=7&gK%lg+g+ zPPcQ1_UV6GVY3QR2TqbEdt_9J7swOx-4d}2E?S>|n~`)~f5^oDWJ#Qhfoz_aNg8kI z((;9u12F94U-Wz;LffR|i?v`4IPQ<`uP@q_DE7FlFJO3Y1WWBxL}RG zQya0l1iA4ifPf^em>{MJse_ZpM9oa0=3NBpKCC2Gqi{ zQ|u$Q*-pa=iZM)BLb^4_m!u!Ne|_z1ci8ja7^MDrj?qGPA3sl zX)>9+4qZ>gs2mJy#MiS;Z6)<+D58{)0DDfB`d4chbO@PM1h{6d*KeJVWV>1N=?u<8#3|04PUdBo{x zfik?Gn3O7=fWb7k(pBIR(oZ-zm_xNGw z`)qv%jYQ-pE`4n6F^HtC#lST9xB_~(A^T3gL6N`%JA+mDh`-0Dy7r7tMV0z=>+b?P zC!?CFsM^pEQ$e)WCSp6}=q~42-D5+Y=hLY5XY)YEL^?;!$-YPxcIWskZ#mbDueM_`hT?T;P~JYiC_JrVU(MI|32=lIhVgVTex;Z*uFk~ z$p&_BAHSK8BgM$!RzX#gW`@Cw!t!8MZ|?=y2jxF1!)p@ldVh$-@b@z(aWa!02fz>6 z%+v81mMT_*YT`~mbR%__hHb88tzV3x_}rD*Y&dX>b?=_qe|_7B#5bxvajd^QooCrS zpTFvxx7@{|DB9yoQ~|oLPICZvVtUHT@4rYwv-deA~e26b!fHPnCyI`?!NBJz^Kdy_K-FR?QTmu*Sd~sbjIuJP+QA_olp;;Kk}bbDUsYeUWJb*nuq@ z0t1w+I9=!w8!pf_Dbv2-Sbxb(uE0bo%R@~f-7q_8Eo#9XjG5xx{=r_(J(-@~k(Y>V zpr5ftTAIvan5IyI4o+>iD|XoJaWE~&sF$!d_#B#IMTvFyC{?*U;B%V%{ z-?eP4Ch$ulk!ibH;CD3zi*Wq00kKzIn`?JKOV7J~8a`s$VqzwEl(E1S_nu`VjGv7H z);Bi`qlFYXwF#uJOF0rSlwxXESl6dJ9S3%={OZ?$U578b#p0UO>9ea%)D72Pn@b-_ z?Q?$bjgm9bd0!fz;|bsU#J<0ssh!{?{E7PvzQ5I+8pf>0=F|qBXfGmrV2_;!=H62{ z`@*apK8%lMo^oEkdDa($(K>UrKoxK1oe;tsfo}*8I9Pt5I&ZOpw5pi~S=5co6tATJ z1ZQXB$_@p_Y;RruT%U$YzN+b0rs(omud1b*U; zE#pH3zdz^9bUrk&uIP7Eq@B_c;`$DXhtfj1!oSy zXZ8`*dA|~Yxpc@T*c{d5Ag|PEqo^UnORWt>T|5l9A9|`=rEh>RGCa9($=!qI=t56U zbPtO}2y52~;dEL@mGozZf1CLAfdA@)){Ot>{TMV%G&D3?M-2e3slDX@&-L^K4(9*T z`E&E}@848|w&=1S^%4F3M0}b-yK_<}{G6P>?1g2Abr5qgZ1C0xMLD{F^Wc0?#1BUJ zIW(j_8ZI4%;OKbDx!*b+(5EAhmj;16tyWO){1#HiI{@j1Tzg#RZJB_%Xe@hOG}5PJ zYtRAa7WmEn#$Hr+bATSxgl2A=&1;JHto_5F>w`u9T^7AL`P_+|pIsv#`UYv0emYLA0a3yvm&Roi~y6Q%LCE<2c=|otd3|WMCQj&S-{gR?pmx zH4M4REOp);vdE)rY#iqVj7xpVG}%!%&XNFgS@lv#TUX#}y}_za)9?V3D zIFuhy8uDDsDGFsBLh2q9#jYnpF}3S-aZrpaX7wIoMiv-QLJgmSKe`B&dm8@|0S%GIGFw_hdo{kRGMfGQYIVsnU7w6y>m{*coGBZ5QS++p0S@AL)-6 zMc!nmPX`BGY(FREW-QBLisI|M2?WOWhd*y6y z&%NK;!lkwoGR$JNh*%QIJ5lsqY&eK(O<_SO`tW)w+HpC0 z+wPl?Df>U;Fh}@*4Hr`Ofjs>D%RqmCAjjCyq`%1f)8Ai8UYkBMqh~nZbTtghUjz7u zU*yqJh?8D|$FJ=YxP^Imc$8%&#AjY`l`6yJk4|uno&xAWVI}Jaq~VIO5H=G02kpe0 zB;${T4fNC6|Jc4hnE+eKtG73TS9firKVWEYt|s_VYDRWzQwoIm4T-*6U;s_S=_S`QC$*>_%GrhgmBP$dJB} zpqG^(m1TY;%S9pkoV8z?w^S_+%Yo#Lpf(nH?Y3G$B@x`k!9P?pNZ|MyAl%V}e)41T z#2$}5JZtNe@~^rbp4r39UrKsw=^|!#z53i1*KW6NLx^vV*#))_zDj%#9M^yN8c8Rl zQ>pRRK;FZZw0)ewX=&~ow?(dsV*L%FL(I&c^aOp~?+gq+{&X7+%Y@cZ+U23t>RAyJ zVWBhtba?(k5%6E3PYAWgxD`=N&tK$JRpnF}ozRoiUu#VSzE)+oZ1A#UNUX*Q!L@r% zdb#;ESmO1y2UM}5&pM=Y(Z7=|_^TOCq`p9WsOfiDoSYjQL+3{(zbU5&J?MKnK$);i zDpw>nNiXe|!iZP!?}A<5ZKmzz{e<0xUT9(Os^0f}8t+ssjqDJ8>tfzw;$9%_V2uyh z=DR|QakUdm*~FA@v|IoxfZDc74HEGdVT&5H;DCz$nV4F6|05({GjCU9{jLMpKBN`w zG+{0t8Y@7EO)9HR^FoKPAT33oGsZGchfrj*8?r-_yD}SW=%wpHrihiJiS?Bfs}TAs zVx^SvF=!;zHG>gX>`UIegouqI(|oQ{H{A>S@JqB7caElNPoo^pL%OsT3VG%QnJ)F5 zM1A%ccQliX{V}MO$e?ooQ`b9w9IYXdh0K988ktP!fVz-3!A+SDeGlJ}|i~ z;sWlE_1npoztBLU|6MAyDU@?D+j$q*A18>P@H4d-*b+{62&Q zWqGxB=hchTr;0#+i@;-UI!RVA4^C|f5iqI5Gs<;QAAT&B`wf6f7%UBraAGokG(@}a_Ti!|Zw zAtNH3v{lZC4xPVyBTIAaulmP)$31i`=iG{=)GB_+)A6wwVzt*LWFJ?YOwUxgEPwHD z4gl>m_(w3){FC4-Zfe9(z`WAuQWLRyX5@Qiu6UUwV*Q>!)=nuGd&BY*1)I7tua}SZ zggs5DME1Q`sOPC#4c+6&_FlI5Yb0B05FqYcneh9Yi25^yG~s^h3C=db&+vGMrJl*q z{FG|8+PG&q=c|8sKQ4jiH^J@9hsV5?8a;T9pTVDJ_Z5pr+l3vH%0@^gek8qo&m8>) ze%G_laU1sqnL3d6z=MQ7+{M-(%zqXw}6QK(}-sx=p%vwJQ>ftcCBv z z2mH@*+&=J!J}>7M?B*c3r^vwY=+Ex1iD$?~zw6-CRD1uQsFcR=zDSqi;?gW($w&m8 z@O3J|_-n!5S850=vh|OZF_2Dz!gMvxCxM?S$zv9vy!Zv)cX46o3xc4uLPq9AoO^zZ zS5ld0u{-jTbe;VGZG!5(qm>2eWsmd0#^n(s+xK3e;IvSB6}q@Daqs1eE_^M58O~{; z`69zD3R*;CW=M)#`Gb9mfWD|vSTVJ zkOP$(Sa=(yXnBf_-= z(|P}1Gu=qR+mRw3VxzS;o~)z!R!`Oi(C;eFR0%10+hYv?cS=RiZ)OoD)I4go;}>sjMJBQ{T~9Llx; zvAFhKjZIuFa1KNE^^KpJpqpN$d{Zu*NvtE<;_x3A1l#KTxZR@*mVroLO;DpX+EHYJ zjSR2zcqnZYx0PhYL(qkey-9H#idO|}%qg7j%eDDUze3wjR1I}@|5VycTD@*Bh{K4F z2tV`o&1irW9N%ZEtP)}ww=#X>#rCa&a}=a$`a|B^RPc)Yw7E;!_E246QBa{u%CvMBB}tK% zZnt4O2d)vkMDn9rq|LBsBfh62anx{w?W{5XPIF?0zclSI$vi6Vju%s76TZ+jkjG-7rblxA>rCqZy49`T6-0#uzOQXt>qcwiY?v;(qTKG9%v)Mzsy}{ z9a5n{gAHAVxQB}D^Y)HE2B8LcT-vMQDQRjv$f8e$a9xIBA;8gn(h``-ScAleQ86!F zknBj`dIKX3{0z17{@>li!vES$fE3}@=(GeElzTgO+564Dx<+5+h~4fJ!Y^O!#;SB> z?zubLkZ~iCDXFRL?&x5I6GfiW8(Z%1SMO=iPUUZh(n0dFIz-9`T+Q5<1EAW4yfElv zytr->xn{ z#L-CP@7Ck|P^fkv?T}@Gbxybsr@Prsi)#c2p;rs;r4)GqQJ#i|w|i`qi)y9K@}Qcs z083Z8!WOprJV{Q^>s5%#g$_Rxz)iKSkNhYSr7GfYD-!YfZucA?*zdcQ%QdqsyVNOh z^g@;Imgs)4=vCL>Wd9NcsxA7qO?`=Y{!;4daM>u?R8;yIqQ#u<98P#X9Z2oUsR|NK zNFz*8KI`u@Gxd=z<0$Os1&URO7LvCrKGu%rKzaF~l15AA1JG9$~GY;B!aCd@m zv|&--+Xr}-G9O9hN&{SjO*@*Skn)Wj-AV^)9b)tEaocpU07TW``{3AMAaD2E`-baj zWvOK+kz{6HS*XzwS;8X0M_o7u>e6OiPnkcQ@O&0wm!oGm7%D5CG)yltPF~sGq^*3n zWx3YA*14s?`B)B_(qf7Uj2sVSgBN1Ulhy6iZjaPGgLVM?Iz#J`GKqj|R<$EORWo(7 z_m!*+7uEW_IbW2s61bRSKWilx;TrDQySA4G(^4I&Clg!859=Hc(jn|W(VL6V!nSIF zStp6f&5Ew*9^0d0=|S6sB7X~)J=qm6wKFXWeq5N%D*|Wnik##QD|!U%{kyvjSqyW= zcji8+x8g_7vwXn)ymvvjhgeg!bBp+xt6XbkMRUv$0@G%as7B{c1&Z)w*AF3+jf zRK^l~ofWcZQC4)UoFB4eClqEX5i%u5UJ{|lM%Apv%GxmU{b11+yy#Zw#t%KwN<OVseKgYtN-FZu{*K$$;=>BJz{keaCg-B0v?&^V;J&kxS9}C99R+9m0PHp zh7e9d=mEbtd_ceaj}55i|Mz_ST&*AvoIkOfXY)MV%sDL>eEzW%?7Q~+_ev(}m9X`T zReRtnk+_91jS`$6B7~!1GNwKy<_^rHvI69CVzjan)CzngO{&a@V{2^fsk+8o`oVWe zHOT=N>kq=}a0UZLI!+g;Vm=aDwup+&#g5h9`15(T$6TDf8%JBu3S<4CB+@?LLJTNd z@vu5r6J|`(N_1O5fpBj~du{5k$Mx9d^}iUpVTxH^%9S6fQLnNMR9TY4ndezNTb06X zj*mnef{Aa(j20&5_PNi9rLUe(y`184F*-R&UcPL&6{+7zo{wt0+IzV-#-1s|_~>P& z4pd9`iQi}Hfv~Nl!U%j7f98r+|C~4~^4Lu3m*tuhMPAhPCm7I>1h3 z(A*xy+^}b0Cr+&;D-stjMc+|xd!e@ z-zD;#U~3Ymc$_o_z>A?2;_JqJVv$MlTgQSTfO|87kw+;4$JVvGpq=@Io0?wsWJw^2 zj;I4aaacV-AcTw1gS#Vzod9^TI73A5R#jw$B|1O`l_F*^2|N%*|Ae z-#_0Ji*jil{pl0Q>&jy~-k43{GQ+$9q1x+%k`$zR?cR7L7xO4B)=5vq*_n!@k#SgT zQD<|_v_0S%5(d0~zlQ^c3siK&4B+c2QPk|&NU|A?YikQ%!%Z}IaI8yt388`NXvC%B zwTG2Q(N&^<@`^x-`n9V|vrTYdDfQ2g?|I^7!(x)fk@_N`R)w1na`K=xl11yR=MZQ^ zMXhKf9%|q|@uExXj$`wV3mz?CRewld*J2Yg0T7%q^xR%YgHPqi8t-D%OO0fmMt6Sv zJSBrVcDA1Y?t0(4lqN6M$W=H!ccg!{G;V?!J-AH-qRIcRBVMoIUD@;0&irc8nG(nC zA3briuGkeJD0hsgB1Mbhc^0O^oj>No7^_3$9^qL6aLttGvGsfabMiXs7t~)@dCnf& z0i27)ANvSC?4Aj_kgojjpG?}Le*&6%c}6$@4*k$E!GK#c_azl(exGjM(R_#->G@Dq zciXIX3`DNV_DBK;8*cZNkmh1@ z3i7i2UW-8{X%whuO{OT6PbHAEv0G4lDAlK&Vd_p-tcU@Y^i4JmTXnhXd8dPiAnO^K zoYTT@KX4~}frmzSci^)3{7`ISBC~3y((mHX)$=HBJ+G4Rp#tiY0xDO1k}DONTk7#V zdptY4#q5!+Yi}55I^EVQ(GzcUO6Ydg2aV$_PTj?5n|$ zzj+YdQ&R~_xt6VVcjmBjYa$w8k;tQ}+>NBq6$y_z+tyx|T?wE!zvfc~%ju@eE)@VE*`do_pqOFV zNY@(H&0ZSD7go> z>g9rK(<;zCwpAN0k)kB&Y3fat;wHFZJ(M`G^d=vT1M36r&Q;p@U8JV0Lh)P5`PC9r zRfA;$5q*o|DsD^4rO>b)_Kgh0?F-fL^KhU|V{gaUke5k9TV8q6ijP?A)1d^Da5x!;Z=Wl;QKNhU0mnI<0*uZVczSNfn z;C`2(p8rS&??3!cb@}UxK7v_t@kCnnA_R0J72Cex`^YkS5teO zSXa?l97Qck&y|lm=id8m@nr=46H5HhY{th5LP-i@H+}GQ?2)%?flix91w$mUw}hgRzMf{vke1 zis59NwiTYI);s&=)Jn@$r#S_zOp}6w|MufH$ zUnmoA@P^#IR8xJO^SIGIbycMbt-YieW?7yIHReZlMu#=c z@6l_U0UsZ4GSz{LGHo(qMA3O$#x8G z&*p49oCw+HD69NyqB@1W(t$?t=dPUERh3C8dFK;cqw-&D%JXdKb+Z8~xH0OLQp zLF>P(%72yVs5l#@u-a7*5@eLWwT&&AOFr%nX|N@afZz38O(ufa}4c_pJ#TO zN<~^n6@4tU3(#Y)!X`#rbj6kJ6g;dR5~xDR_>eW|k^NZ!OCYeDLG|5=oSp$2&=t_r zy|Ckg$otSUA3KhK)7!Cc`}1=rue>JQd>x`kq4ij(Yu|>44&E~}d)d)f6dAQ=gl$T& z*si>y8B`L3ah`X-|7-7kopU}tU*Up#t$Y2FoHCL$*8gPJxYM-ho8Qu{K4994 z5?8D0-)b%fbmBl(gJV?oPWiTBQnTf`o?&&r5KB#$Lp`&A6!iL7Krcm?qQdC=z<>@z zt(C(Rv`~<4#)ZhM!pE6J>{(OaxkW1WT{j6I?&U+UIUYtW6V6H!FqAAOfaYxNtnKKc zV9iT|9%#FYeQ=SqKxS3j7Krkd%K(|V-?Ja3PS@(eZ7ldH%L&dRshNTXAb{IUwriT(N*L`0Z@@_y&Jv^_>M+X@`g} z*F<^HWmhLG7s-=3t{LJ4H&)@+h~MW#9F9$2t6ZLobtEJ=YE7kco0#W?n9M~=x5vIy z53-C!04PaPyWS@*-*yr1+fqHnfJ}ldsq?`Y*ZGI#DMJTPhAMD6XQ{-Tod$dX9vL zB17KZH|MfNI)%mu+?c->SOdHArkHkFO1guvj zB#ph^9*FtkR|%?qpUxMoz8~fN%L!-M)sTN;GUqDpzoOkLivRj_{?EpTuWP3*LYtg_ z5*@r9y&i#3TPfqR$O3oIzX?%xlgm-HmnrUZ{hO%yn?D*T^MXXJ$E(0%q98Gm@EE0g zoKI-SH$h~HgRCCyi^n(%e9z3$AZj7O`yP{%2#DICt(t0YiM?J)@wYU(G22RAxjjBw9!rN@quwt(-BAS%c0N|2>s?J( zKZ@NCKAlfqXDvg-8E?*C2s-=Y)&3;@p)mD4EAe@Z{ zsTQyFagOy5BogNXW`m~Q+I|Z9NghL^Rz%G6ACg$PHizGaKVuNw5&7_o%4gmaSvuve z9u%y8O#3(0uG*@*z+IX?DP=fb_C&U}9oc8pXFXJ(-25bg7tCLFLQ)jR$@32EBBe!|b< z;(fmRa&;S+f>|9oVmcX1AeRs1t+C!ya~VToY7%jfVk!58qh79r?)2qoMVQaJc;SM# z{smv`BICfi2(dai3}HXXRvnP`BOvAnpnP-qHP0G=$&777>j6Rn{!qqGlMWyy=W^;Z z8zDLc{h;FMeqU{lmmQOXJjZjYUvVXLpFJI<@ha8mXj>Jg_XNa8x)IH@1(_*3zoA_J z0wii;O7dv0>#C`ciUG;sRD^)M^;enwUixApPLYdX3!&Wkp9#x$ z`TwkgF)W^FVWHm>U)}dUp=Oww>0KTk=-Tr)dQkHO-fdJVXwEP9-p=9SP2tIt!y}!i z0TJ4LSE4e|TG zl_e%JT~$&0mblw%IE8!iri$E{Ok-B9$l3poq^j! zIV#^v-89pb_>{$+JW&AZ-p z%axX2U%EXJ-EYkwb>-aeK(2oy9%&wH^!LSCa9F<)5JJ^_q1{C2WtA54c{cFe?UUIprzb!HY5y=Z z{=nJcb;axKRG=Y656}rrHLb=f-k$9$TAb^JECB{ji34p3XCf1xf)>B9vH;buEfV1j zf{^8%fvV{!20@pH9)XlG%f7W6X6FDjWNcxLxM)O{Ii|FJeM>`obk+~$eU&vNe}W>O zB^eBi+2~|KI$s_KDXj2`RB<7zp2TckMC}}na!Exl17WL{cbUv%n~!V6IvUD$pZE-S zt3O@)+{0*iKWxNr$P@RF7%$~vjZ9=VqgBktT_1E@{VZmUJDMld`uSZj<5aORkb$qa z;cBxONuA1KQodQp1sD~7BF>tDkUAnl+_6|gTOxcz|~PMkZDk#OHH+k|tZtX{L+Mg59C zTDN*;F}g^V=+#;?fLtZHirujZX__FFl-;IRlw0rndCiwjxg0yZAz}1F?YgKDZy8N0 zvSOdts{Cu}0lgSU+*)>Vzg+2b_}f#T{=?^96XkZ**sGy@0Xt0OqJ0hjC6Gl&zuYfp{>aRu;&aK~SC78}Pc}mQ3BqcCMAgxCR<&^E=Geh6_(JRtINHbCd(S zl&@W1_-Y>MXB@GTaO07^&!1m6sSMQcnG93a2sg(ZyfY=b`tS{@wNYyZ*cj-1EL=G~ z9qF~F4v5w(efs(@AvSkDVJ=g6cO`zjMXe%2Hgl1Sv1!O+bCk^N+h-MH60Qp7G|S{3Urjeaa|!=N=yyh^_M5Ry`rU|2ce;76+T-59 z1Fbe@ZF6Vh+B#lEE5qj`YVDW}y7E(|#f4bOo5T<24@$%qp0P(&Ky@frJmq@%o?5c8 zJ*FT~m2`XZ8{AO=Iwis)6&@`=j0rcAIG~zknv;?uXtubxD8z1+98l?l z{PZDW)QmmqYPF#lPL8ZNTpYJQG9Z<%^qBn{c6;+Xy93BSnZZA`c42e1Z^r_3It9Og zu$?x0Wp}mT7RA?YU&v8&SY9r@s(f`P`yYbo|I*Bz{=bJUju(2XC@ZC|`8fD=oufm~ zZ(Uyh=^)ExKWTgMMOZl-Uv2w5s9rMm=0>c@?EUv)4-s1OA$#Gp3gcRdR}o%ZF$O)w zzrZ=b>6O%MHVW9QTvO@5)vB+-W#iL#$5?$5v5P*1q5UeDdq9!p+w|@xq`2X_cVCSm zCv@Y%=7AcP=W(5Mc5H9NKBZ`UxyfG3&GfUW@^%mE@g@MhV?hk|gHSye_DQFbmFbUB z4#$u7f<|X#G&ejUL}srS5^{sHbQ;$g7UDVI`3>?ONI&N?v%CWCD8W}qhYYogpz5jqY_FYn-RQXljs_Ja~o4Nf=Qd%I^s66-1w>CyA4T{qTDe5NdE*;#{MicvM}Xi|_|D*xC55rSaLzROD@BSe#c zY$rztcZ$6kwyA@w2RTruI%I%Y8Py%LEW2IY1xTt=syWXLlX;}d&xSBHUFZ7R*!WSR z--NX}>l~-w%+w6T1X311iFUqV8W5v5|3=xNw@anAepnt|RhYazUGD#0iZCk~(z;|4 z&K1}1gONQVq5v-LV#ak+wAfD<rvibfpZ-#8CO&#TXoow9vP30nY$UqROR0qI88-Mc)7lTC^i6a=zns-bc>=G<3S}c-Gqebm`>n^ZURS=lM+x445n^+lgb(2y_&KA}(-5|5W$D`GLm@_jo4FKFo5Vyo#RQ#&CzWEgZx#2}cxv%z+ zMDY=6!3234HWm3&#M)a(RUYkwCUxN(6>^@KL z{`SBf2bMlw3H6@i6Bm;KPv@lGo(2T9DE)J-l@)wS0z%(dW=c&4NN8z3dH01M+y2L- z|9d_}57wMyGf#x96q9iA@ypb1>GwcZ+EUk>^hp_9NV2fIYX&7!=#%m^z8HU--t$Mn zqE6pPeV&=|>@20W zqZV+@P`<^ixV><-maqVXf(ZQ_EZK@tj+ff5`9h6hK@jrQHK~0)Jh$>D;<}68ge2IB z76G<#gGUnVuIB4HrAd7|kZmIJc9dKBD4RZMyK(bUARXE5{!Y=^(G1VZEeK7wQ)6Xn zd`{q+!aCOCa`WITJdy%tv*4C|K;0v!b$JOo!`3o>sY{Du{0@ z$o$+Ug#(QDBM4LuWP$mA8l(HUv2dM z&9#@J%!DBJripi@->@QRYDVM}SRPYIB)r>^m;ckzhd<;=1$Qwy3@l)#rC5_%x_X<5 zFEUQk@`w4|)#@#f*TfTG86yKU%j$t2_s1USdnRypN7XgQv}`@>NuBn}kqc})?R>L` z&Q=fz&5kCg~}0i?vcOb6>vz zWnBIjZ5x?Xm3qA|At{S*f4*AG{`O{}vq30YV;K5S_}+_ZZiv$Cc-V-LVftIDDew0y z`*KXn0P@2=+Mw11=h+ozAQ!S@L)H(|lIHa)|Em*B*u(nK>Z|+hH>)$LaG2^YprmhJ z3+%kQJDz!ZG`gRTe&qiX^3TK026YPhe(JZsK{>nKnTK5+v)#usL={Dc-tF#qLrq%s zoM=gz1-sW>Y_S0)U!yh9LBjWuU3V-`DK@PoJJKZ4&V>cEt!E6lmjt|Kf##FiHw8(? zs+zu%s&@8g+?F@INF?$q9Tids)evhgp{talgN&)NFL9x$Q#$+}p{JW!%xI?Ec}t zb+9Yy#)%M19!{oMbQe~17Z5;XTY&J9)D}wHY;`vJc@K65L{encS-n&yDg$3$+xOpj z;-6d9!{Hh>cd6q$`*BA?Yf1En`{qf!6jKo|`K_JGBb16z<3h+frqEK1#epcbezkW* zy-S<~xFwb7v5T{__89n1YN^IG9qI?Jm%3S|-F^@n={57yMoP99V!Ey_tP-RGhX|d^ z_ct|SWv2|}Cc0h-JXLZl)v8@{e@9EMqeGNoQXYfU)HWpk%Vz zFN(==xnh=)>LFsxpCPy5!1uXdH&1|(gZ$fXNukD~d7`k|ee$eeNont)R zf4~DeY8{uoTIiKOYpf1rz@kT2E7C6k3JC}(1R^9y)lKAmGzZ&J+3ev?%5SI(p`?My|1UoR}QH%_<2Rm|-H9 z>4yn*Z^U~|W2gx&lZxj6b!6#Vy8hqmI8=kbOWwribKW*eNk;u07nw3ZcSUPg7u2fi z$|6aaUM1i^Uinb<(>*^S=T+|7a`j@|(^*^d8qHYCNAxMtuEWBX+uh~CyQMqu!hOL} zFOZE-O1sWUH|WP1_qAK1Ju`uY>5Mf8%ABgfLUxAZgtbqZz@8xPcx;*sqgr6E873tZ z5gidE1bM*wE@T)FrDpP6_v)>kXu`nwr%{oFF;1GS>&GXqVE_w!T=?MJw5@+stJt!h zq~X4i+!?j8VSn~MATufV>Sx{7+27|*S97QSOgjfwU3Y286B;$CR(ysRjB+D}Fam$A zYbTV|2m%PY#p1DEGM}P;co}^B_V()`Q`gVocuK9obl3DQzWHldS(%ADC$(qGmm@lP z1LtaQrrz2=j3*V5Iy_L{p*|6+R9ffoGH>g2F{2rQr+6nb6qg&M?#Yr!ia4#-)LwZH zHv#y?ix1R%#ya#oEE#`1FkRUwD31u3gwlx%x=D)q>$*+QKJ}0kaTND=0A28ZDfD|A z-lUU_G-f2SJ6DKfWr&0x)%hW`6!57gcCwU=yN1>dC?;JY1o&|pYY>SzzhJD6DUnRl zlV!46T8GUP0U5oB7lNOZJQ!EI{Y!rN%TSw+Z9#a*} z%Hz$G;)D88Fp5spwVyKe>OE!c5 zjG}Rp!FQoM^z6dTPZlflVcYkdnSv-^MkZ2;D@xPoUV`r#^)cCgRyO0HN#p0$wdZSJ zQP9oHZN%lzFapKXX3TPp!^+wuQ&V&6*1SOEIRLXkOYoc)&t23FMDLr=6Z|vIwn|U* z=zALUW&yC*!UTT6)h?Qo{^L;_*oyWfe0pO{lj z8<4R6!1-M&5M>igVT1bpl!onF_qTAcsmh*Yb{J~4`%uZ!;yVv`WGPE3h`)Ei+sv?? zYU)C8#X{WpEIl{%c{OT)Ma-98KBeB@9u7$Eg!kQGtPard8si)@|J(EO;yc}rsIj1# zdYx*-Xz`B6>+q#-zq7{zOZL7}iP*XOT?3ce)R4smjnQtMH83<4_FdI!X1q&}LTYx3rgIYkMsH zvZFma!#8mWTcKP(?cMgUcQ>fapEQo(fBWxo(35=oe5@h^%Nc)M24pQbR`hwPpYUq$ zogIdSZrOrpnJ@0H(DlZc1{Mm{-R^npLnNWeJh?De&@T*snq4R-WD|*%{wC(a#y=5h zS$(~rHLtWMV@21!TBU=o`Y!-@bkQ|> zNOk7p+t|3dPtjz>-)&V7RD^~vM)8ho3^oFPF55e^ulS&`(3au{ccSy$_A@o^8D^GS zQMRQVTTZ4yDb3g&dc$j@k3j#1wLjaKE|I~aw3%9d5|m{ZL4A7L5bW1~qvss)2fdmm z8SXP21m@)AEEWES0oT*hn}2X-JuHOf6s@cZa=m%Iaq zamTxn=bjI$k{MTxl%Xx*0i{fLJ{c#ZZ)QKqGE7{2m^GrTtX*bkzgl$<(yPc&f_c8W zB+1?M=I=j*W0O`-#~;jfO$VrV^9K6-I5@ZyN^IyZ8v5;4XkIp=eJ_sg+Tx1{pi=JuBb8=anl)XDq+iQ#QTt+1R zwWDCDJd=Ku@;P)vsG7Q6oXH~Rt=o&FuU1lwG?nSb1AETI5IKr&$^+g4_jcQd?MDt) zS1ID1xZp&5XdsGfGD?1w4l+?^DB4dXMw2DH+lzkI4kNaP7}Uu%N7CipeI)9lU^%Rk z%x}2=_aj=H>r1F04xRlu7mztcF->yH%ATMcQEi`ZO)$Vp&F@enqzm%=*ykRf>qbL( zQUG~xM>1T1IrSo{R>D+)k4gpT=vaNEmb>pkzZKg2n$oJ2gyYPE=kxjmHMdRG_Z~auu!$hF zWG@Uwn+zMNC{V2y+Z^M$%D)D4VR?QKSQ<%~-UI^wo@O7{=t{|}8l@VtzZ**Mj65&^dywU;6rA15D}3VI9g@SnUKp}!*Nb_29@_#Lqn#TvbGR8auOt(1lNb35m$l5w z#|KwfIFA0C{Fl@s3{5%%U8f&iJX({bR5BTXW0p29E~7Q?-ng2c&3v51u;gU1R=+>U z&dE?M6bd)z!0#NKFMC~NZi%h@CdTu5jFtzntc5HAtX;a@|ItKT8Fb#HndARfeDq4e za}Rhp6F_#-M2=0hVU7XQG6Axz+r*y>hNT-(&&e{`TQm$iWkf+ot1DJ}!QPR<&L|eb z`Q2sv8!xdv{fiybu^|PSU^G}3Mj+?!?npS*Ho)LS!3aX*y)#$2iynT6g2yB~Mf@_5 zZoZ$fyqsdhxas#+J`)J96K+$jA4G>JDjx+zqDO!cg72yrsdJm>y`C))wM!$mTxyyz zA(g)^@z2v;LE!Ry1XchqlFkmfC$R6`!+NVe-rT(P5-SqD3QPdK(idIr_xn3Y*@3U9 z&J)*3bMavUr9+X12XBnnUY9at@-K5WvQv>BP=JJ$1ri=>0H?zSounVIQ`tW!by>L=1OnHxfv# z#ooW%`e{ou3?<&s2MjOYAPNTMxkUe<-@Qt5yD5w0ve1u{oR$+fpv$F~ z;owF4y&fLfHP4AVxS~yNn~`_I2qGMJQUGJYU2EfkD-()ORE+MLil3}R;MpqT5dE

XA2O|nZq|$uxg(syl^Y<>sgR6H%xr2F!xSc^@ zv4b%TnBDI1S4HB}3;1qXx%|`ALxtLZ7J%Pd>}q$R0cQC0aw~0w28+~Q^xft&VYO<+ zQLKaR$F+kJRe9|hSo(X`T}~h{?a1itQI{^%o8-W#Vt274m!`7seOUYlyUT<9F>=Z7l|T8nm8jgIoG)W!s45em7iT zjti407RyCxkFgPV0N!Fpt?FJd8ay%P=(~3(BY`aR&k9SMLXX2m6=p!`=~ax_uRR>3 zUVDKYT8B-RA>iI_#|S75Pc5J)7}7W)5E-J)sPnI#{t#o5r+Fg?tQNBzqWr|j&B_9TK{elx_c9yJRC_8?b6=8B28QH z(%Rp1-ZFjHoJfc;3R)rv7`@2gt`N_z_P@`%%QZFpZMTQKbP->kpXm@Y5}AkpJnWyY z+_l-0bNO5#FlTay^y{1lw~uZ?h~Sj6v$2_-}$4$=)ig2h3WWUKvO0a(4 z(3hLK;_8_f_3Jh1V1f+P4RWF3cO|`?u=UDq5Gz;XvC%S+1AOz__+L745K3U@jkk>Z z6MtUY9SN>K2}d-XcZg0DSg_WGA~upq7ufm2stte9#h2Q&Lfs?ft6QQtwt)o?)2}kf z+!m$L8~DYxvHU+E(K_kM1Li})^+~?M+ZNHV9-krZTRVfZ^cqAYY_BHuuoORaEfX3k^Y=?g+}@oa3li5>@_N&xd{Z_W zmnI%qyptbK5?)M(I$(I0o_f3bP?6jId-*K-i-sc!zUr*pD0*{mKZ^G{`O_5IyB`^D zBA}``5RdUX9(kHKS?+-_3NNae;}c4R>1bh-RrO$HZ}lC?IR*)dKGY@m9aiTcc49ur zxAg{z;r-BN+;HY8otO{($EDNv`0mMz9;+Q4XotafMT6Na%Z&~Tv~-UBihy~0U*_JW zLA4E^F^3Y{xx3zSS6x zOXY}~PtQh2may6YRVj$S+k&+zEQLwqM?_8G=P(4$?dfwDgKoN95Rz(~;VtMDXhq%- zR@^WBj(l8%%d`|a9ofzo-fjSgjGPCUN5t)Ki)e)+(qDa;Bk~_D9R(JGT@d-5?TSVX z; z>2f;jpN(&ZGYaSknz7+Cx$b)hGZ3s?gwcfb5M%-NruPK~obGV=%NC^JDAB`h|IV}-10EIpc$Onva~JLVB&$Gy=$dY)jd+WGT86cL zLB3x2+dLfRfLVPls2PeVpNFBIxA~hj;RbkH^vic%lk&;@(9RdsRffPKTlJ_$TX9EH zumug(Y)eg*^zO$W`&1tZ`GY6$#ZfGVjlurx;D_Rns5il<-&!taa$+BRJeh9%41Um$ zlj7~-L=Q14_4JVPD}sHd65q+-g>>;Z6=YfwlY#hO#|k*ay%$l@FrneLb#Yh-h1Zjt zupZ0ELI>`9$u@UnyiUwYm4G6+4%M&MT6$kJ zl)oE{e_yv#|6Y3k!oS}jja9f$3JZ?m(%a{|5X4#`!RX$beCJ%qb z+Z3YZiU0VKb1pY+EbgdZ3^|rx2jYrPxp|A{GhF8QHW`{0DF7vZPg8KZ@>neY2RrMJN+@w3} zWO!~$17Ga^^q;r&&+x--mGOC+%t2j9I5sO9_)$hT4myI-+reA!~w<((#rK=Suf_u_p=g8 zY(XIZ;Dc;&wsi2HWzc``&_+@9ao{d(tehgkb1`#63*DnESd`HG0k8Zpp`}yL+ptK$ zC=PLLAEn|7&_ZZ%9X@S<_c=m8T<-PdBB0zg8{@7~SqJh*xVE%&{*$7$mG#C;eC|wd zFygdc&E_^!hFK2?#atY42h4{pX;BVrdc+^R@mSg#H=FoI7~s~rL51b>be;GV8ft8R zH(E+LFK^h2dh%};JQ@gW*r8TfRWp~)=!|zVSlbNL)bb1ey7f2{r{`W$sSca`VC;96 zd=YH5Rl^e;`E;6+VWxAnlWLjpbkPZp@$J~NXjzSV>SJhLhIT9!@Yak>kk~v0NByNm zL-l~=I)sA@@`2Md!;0VV3Qtw>7#n?Dgpe^@xfUm46LS|38i3?IZK$2{6P+3bD|Em2 z86*ow(2?7ZBoNqmR76h4l4?Gyw^>fFLG|WU)l%(o#e!4qFYaG>Q*AG98+}I`i?iq0 zciCW@G)POq#2tc)5d>(_O}EcDmaVYY>?uUf)y9tv_JSvY6^ZE+69kC@2kOC%HOB8S z-pS@%33|0Mxq^D{9>f(V`-Kk=K~w%1asJOfoPc=q z3hfPc<`LqddCVsaBHX@C!Wt$fruMlSx`cM&@CH{8Ynt8-Qi4saeqX6BlEkXnD0?XG zv%-m{Z+>*Y3e+!-H$y zEj9&f44TtpYqELLtCKG?d{DkCmvchaZlQYz5?HAP(e-Xw=!-oT?ovZOHG$_XrKka) zvTT;rC$)8cqK}#FMJA)-W6;rb5(HM3f&0t04>mqqCAf`aDTQ$6Ai}=T%v)7&i^^AH z`u=k5>yzfgJh#vazpu>;;vNTO7U%GEBWY>P_0MK^bz0Bs z7R8ED19<1T;}7J$ZM`CJT@Oeuf94`89F{1w+&|{voV$)R%X%=m0Bh_U+QpjE-vPZB z6msO_^A3=}ai>l>qbSe2N9P^jX~~cAa6R}6K$iHQ?t`zr#OL=&^{rpcR{XK=8WzjV z#|$9k2C%%d`>}Ai0V<3r;Jw_+be|L&qAy2zTkY)87{7ozlmqXeB6R-B(pd~*O_~pR})z$mj(Ifw%}2f zXt`*fcS55~z-p?d<0)OPA_`5-L{il!2@tkQs1c{e5V`RKzZ_T`Lc zu$51n-@K^)*R8j4<`a9=&3b#l>+Uz?OS}eT=I~vwZp(!StxyX+%Kcenk8#>K(R1ZB zJp1ctx-_}Ch&I>|TqhQ{am$sG^1h})bYx_D)F1s=TU#^FltI}%$oOS*YFVVDQFleC zw;+H`+tZw?<4w1_JNM>oX?GJE5`9cEzsTQ0&SM8}ljOr4MiF zWTb;?CD^ikU;QqGy8mFVHop28V$cj#VhLOI!m@ot(ffYFeQRU5Pqr;;)sRvCiSu77 z3m?Uci-H_%mx8l`?%9{+2oxCH{6te8S+WoOKTqy_5*Nc_7l{h%qP`6mJ^xg^N`S~6 zO5|5 zg8R>MXnVpV->No2%uZ<9UzlxZkO8cna8hQLGli}T5!!(@zK9bA1X9oA0#r}Q3!~rDN{VmE5~FE4jPYmxrGA7i z57E|m2~{oE(jKswg!+DZ8npPae58@MFdd-ngm`qqKmYw-xVch29DtomT?&14-p$`i zbUC8FDf`%<2p`p%jQk;8)&2OAaD-BMFdKM~^LDk+D5d9!+x>P0aJVndhUuosZ4S#~ z!~-fGgbWCqt;6~8Ut1>*`-X$c?PT8}?f&w0m$|-}7dF^cdGzy(%h5e|rT}(K80Zsx zjscku(u#EQpkLA$tR>8MxbPTs-PeCjAiKwo*}U(6Cxu)j2r%1f?a28Lto`n|e$;Tr z5&hiMYIebQ&txzRTx+>!Y8FwORr1PA^2l==A7ptqhoDA6j?tMytGJ>>F&XQnQoAa3 z1)UU>fRm^fjDAiQDhi zhH2LKeqD8;x%vKJx%=Nmj9hHTS6NrT_m*-NnxeIDJfAT90lqn(P9)MS7a`CK7Bjqo ziiyPcuMn&D1w-;X(;fRPg9m1=Jy2_BA?xY)X@CNxG8Z-4mJ&P=&sC--Zaec!uyxBfvlc>Od0mIxpz z+L*+~HGs#O{VsWiRki5yA3hHk0{x4lw#68-eaZ*s`9d3VRmAFp(I;znM zquPcTTpv&Hjqq36eEdMf`cF8`?44DB1FEg>!LzlgK`ip{9N{smc*-Beoc~@5*#ruxA)_sP9YSypq126{zml| z`Y+>-TpERXBAN*ala+D?N#pvzYCDp|Dt#rLl*}P6mjmREWyei>UsAh-BfuUOrMo}! z>Ldp8BBGV(no<3fJ4@y^70WXHzexGDbf<_xW{Tm97Gj&bq)c`Vg z+mZ`f@YP!&XXx^yQWw-+AGTGJ?q%&ooqdeaRw^W}reiG85c#JIP3i$3Yn+hpSg5=% zHLnVBObRjb({9KsTQp2~Z)Ku+AX87^7x>NKz|3)FkHC0kGV}NYk)J_yiz@oKIb|h8lk`gQX-5% z!j<9i)Uv?-Wx9g8)Q>Bt-rpdB4@&+zzX=zpeK<9|=}cI@RYlKMFD2?w@HXmWRwSZm7DOGa;Og(|$Kxkaz~G7f3uv z7{le#%Sd__}8_zw8nkiunnTd8ye~zLh&h6T>&Y`ZuUZz`;1t`9&xM6Q=ZYke3Z8w;F8_x}R z=7mD+$Z^~RjOVJ*!JUIKF2}oE6sd6_{tf$t^+}Vg40+o&XstQgh8;nDG8*xS&v2sa zdI$U<>xsBS9%sj#sDS)inwd%8M|eU&L0I92{i9<;Z?bjYKT?c)sc?rTcO(X!v5pjY0n?{_+Trxk|nd#oa zyzccjC8iQ4gXxi+_BI`=qffBIEv{u9fzMu2aV8=O-R6v}tcIG}Qvx#1eVRI8{%%{e zzZB@!eXmRWK=;#o~KaXxpgaoZ#Nhm z__&|r-iwm=t8vt=1{>l9SFu5bK$x{vV>4{BZVd6zslt?tE_WRYKAASGB!4S5{tLxVME^y)%|1j&xEm8CB~9^sujl*To;{L*?18WapiZ4? zWW+{l$?9)JHu2t9d?p6KA2KITeHB-JRMb;H%<_U>>eo@Nrzs$=UJF8IXmm{B*HPj; zld@bPp`PSu#SOgELQ*ztZk%FOl0Ty?IwoN+mLr>Mpmz3{1(SqR4s_Jjym6jpeO3W6 zc_M@{O||(5fqv&>c|RKn{wbbn_%7C@Pr7n=KPk7la{mUFcjTHbl zQG{oKmanoNX2P*BKZ!oZ(A>E4_JAS8&HyW;s0qUd_xRKY{y<=Yd;ifKEeuePxySe> z?ou965(`aG8()!)XchjY=hwXq@Xu|^yw21v{G!ID%a^)3DRW$wKbF57Yyo=_1UljS zcp?~-o|=j&BZmc75v?QsF6az$lya*wD*A2Ngfu#jw>nZ2ii|av^FYo-$52 zx+}5cXv(Lh=uh=R-!=Etq~3P)@uVSJzwRRI|7WbeS(mL9epnjqO3mXpyx|~oY5tayBG<@| z0ofGJv9`KmdOBfpmtt^#TZ1?VH}eKfnrFrh=OU<9$A9lEqWFvVSnwMj|26Q9W7@%6D^r6QTzQ-y^37nQ?`%cG(1V7SqD;9s21X*2&^0pU4g%Y#vec*vz=h=ZKEBZ%z0DLHveWBdYiF+(L0goH@vvo^p8FqT}^5dR&Mm^yAh<`GLZNiU>XYG**FARDy*z zV4yWTNcFJ#Pe@4)D_TO^K=&)5GD*G~c5TkPPQSqx3ctm4WOR>VkAdjEgM`Lca*TvK zKeO<^4NOq0+>Wc5Goh|)Qjbi&Dn_vcRe!jM$&Wa0|I$uq+Fzdr*O!b6oNLM;nB(c1 zv=G}8`r&v)<=yng6Q)WK?WUd}4`rRn@2wgkbNPZyMcbG09H`44CkdR6d8)+|is+4` zE28!O#O0Ib{ee@eKVTX*a9rOAib);ETn$5*0pABr9De z(ft2I)>+0?wRLTu+;n%BG}0xFluE}2MA&q9clRbeDj*;!-AD;+I+R9)O?OGhCM4hG zxzF>Q`}e%-1AK6?OOWlWxcewyoWZ=?G&(%abH{+Dhb zcL1~87@& zUpw`uFyd9CNbJ=qq*~YaNx0QFjB%W zXxGFDLVW&5MtttfJGO`^Q_{G@pAL=k5ll7Tu&gdn@$ zaaw15O}1l=3{eLP(4K)j2ewU-(YJ)gtWH5}gtVfT)VMVY%W*BrzIW^79b%7Hj}N9H z$uTi8C##VJ>#h6My|J=)<`AEGz@RG*bR)5TF7H3e1Ac*-z+;J@!3T_*&#J>^X~NRO zw}_zTewB^6@8-f_Aj)D+gNq*>JQ+a<4uZtlrRn}@7@7xF*d_I|R()1JaOY*hc*v>A zi@uE)>_P`>0fZ#*kQVfb`rFWNWOWU6$R^YIUG9}B`PJwjuo~$1o8&6$LIEozhbla5 zPWcfNMK?FmCwNz8{(EqtgT;Ia8M1kwET!&nyN24YmIh6T2BR_0@qQgw;;kHzSuEc? zJlqN9=OGV<1_+b(w|(a5VA!gqAUnnXg}XA-YI-^%Fhc%=!{7V87rM}QN^*=TJ6t@A zBAvch&tkU?qvc(tjO0r~>6g0EL5;Css6!qHMj^CWvXyz8bF4d}dsuukbDa~ASkU?U zCA5}ZZSANAiMe-G39rfagGiCJDTeh0QZq!EX@!UO4r8~;k8QUo7pv)~2HKzd_Trrh4mgagp^L$NxjH&>vLqtOA4#$sf8oKTtMwfMDj}wIWgBDd9k0$lxG5znEBp-RxkV7`Y%Vo`1A`YtEc1e+CASlA!)^Js30r z2%`b~X)n#K+au;I$hj?halNv1Sq!u2MT&6)RcZx|$a&qPde2_e9o~NVv{V+e)+b16 z4j!raNY)ZfW|TuZWrh06Xr!dy!#vc1X!4r2!HKRzk?0{za~2RZkqLMyE*Xi*cgSY? zemlm!Ww8PWiLUa2Z>Uq@OF%JST~n7>5ze_Y>lNjMVp<;ZaMaNnp7g#Q~V&On`ktuoeAnu>Z!zs*P(oF(%p*eUlx!~TV2TB0kte+ zyNN0bxXR#H64i=1)@Hy%H1z?-TBvWr0z<5+FTeOj`TfLF@WkR^9`~SMd3;$p0>Fg3 za8&cg!1l?s7gyj^Gp9`RUz7y zS_#SIfGTKz8Bx$Jjp1)KrTWA5WkKp8h6MWt-hH+-Oxju6SW+L|xZSzdx)g*^F6)@^$OwxIh zRtSGLWR0;KBC8}DS7uK=l8n(n;&bV}#xDCHBXfGp&|NLoj^BB{&jEaq;>4zYfs(@{5Wq*>Ck%>2UnMj1Z?)~wD zSm(QNqEDEW&`Ifer2!w!haSX7uIQ9F&+R>6W{|vTH81(C#|42~BVYJ(>Uxixc%0#m zcA{b==54Fd)qcln5@iY+Ki_iTu!4VxK_4fJ!NHf(R(>J`)En!k>N8;#s3E0V!m$f_ z><4?GTbKoI_Xfdm{qKRMJe1`|3SZJu*-N1zJg1KxHMt#AkjI{Y-6zxV1`2M*(e>yT!SPJiYPHW%|t;k(v_|} z`1de)t@iXVz#+SD#`n8Od$;Z?Z#-LwV7ghAuG)F;bU#k;f6Qz^b|j!)?OA*FR2!C@ zmP^gZuMcT5o~}we)TEzYJ>Jwj4!IrSn6}`fqN47DVZ0c|P)q5Ka0^Pc+2LaU>g6Ik z5PEBQoDcRoG(sttVbqPXBAoF@i0qvU*_SoKw87X~DS^bBP5zj&axs!YW^nfyOwr!ts2PEcU< z)Y767+mXL1<63~h+R^y%l}!tG$Q=!8$y?w=z53hXBthC|@ii`>C=tERG_xc;Ftf27 zIT#D!A#%)bd&!%EOZO-v9sc23YViQQg+$ejWK&%?3iYcw5|C(5g0DeGJo`|)A)58c zZaPj1&pINNn`(d)(~Kut>!)oPWX(`dMM@cHHFQJD_qK*0GQQKLdlzp~4Wb?MFjj7l zd`~pnRua6FDBYC1eRQ07UKa}L0tbHX1(#%M0Vp3l!G z4s6{>CYRz4JyKYoOSH7HNwXwJS%=AMlj-V;JTQnQBmgUv1au(6?T>{pa!WU>{*a=Y znhx?Tybzu>lDlHsJ!Rpcoscr7bNb}C^Oj{XXM?TO$nYRNk3Za4!nEA1Y3yU3=+hF< zojdcGc40-7jfixC9l;UPPM;VnZDE3v^qhK%GXiG0i$@b#O0?QoK-t=h7t^KE@EQWA6Jq28 zpPl_g>wuMsAd29AU7{%MDlp|`J|#&n=&!n6W|dPkn1(sYWx!4J?dsHb7;Z3(I()_Q zq;#;aEhntTZ#xQ}UsLOM^bK^vI^l?Hdg|X*i|z-$RBjpGetcO68Len5t*NZ&-VeHV9UHU#+jqhxaWF)BXkV;*yn74(2b8hr0owI=uXT)!0GsPc{qj zukj3F<#)KjAd4?GQD1zW&s^bKRK$?`KS!G`aJ2myPz!XyC-JhUha>BstrcH`s2gH| zA~EQ@K6=gI;d41{Egf)WRF9o<)IG9r|D7V^laR#y(i@na2DnyGI^yOhCRZ5;TLKl2 z4k=1Fq$1O|6*~RoZMc?l~d97>wdnT zqIAeSO6%q@ZyPROz+}S7^3^>_$M3^OgA{-g>@}uCNiv(ZM{i3T{V({E_ zGeFFRuOKL&^gnQKAq7f7)YL}JO7+N(p~%%ItS9?!;sVk_DIWfm$qyl*3N&4en;QW^ zLFnAvGoLNl)-goUWxiS5qY6IqsupA1*RTG#3Bf-eFn4t%sqc~) z`F4I2SCXB}qqI5Ih`WABpOSMhdC$q?ByW5xlE-BY^t)CCz>m~agYWHwQpcC5k>C=fw%pEH4MnE3<{>8 z&KwBI9X-i$KmBR5Tt-(-8-|36q?f{$u_?}t&ljva>gg@S$DF|nyBLzW(oGR&n$jL0 z)Ya(&QNe&W&tUYPa-vgu)5Apg!i;a^l%(utQChJg+I$kM-Ph`9WR?RozO3A1%L@I^ zs$%_LJ10Uhi_7~>*BYLy-1+6g-z_!nr%!`#mZ4*;9P?0_lW+q5?Ax>S&LFAXC4P>q z1arP{6H5q@>`6a?T{=!7G=epSqGD1!vrKp?@S$gx0Gu@7fV?ARrVZkMxzBdTCQi8TfC;K zC^#OS6ih3hzVO}WtYjz8uT4*spX-@&Q`*kciHfWh;vXCS^wwWM7Twm)sG-Ue?8@KK z^|9k95i20tenM0PuE-;nti~1Lmnc`Gq842?WRQJpg5lC_D|63qJ=}RMBz0duY$OLi z)6Kon`?IVDFdi-xu1_^x6;jq!Dh-vIB%<@n{Bg&5E&W`miasiBvfHLsCo<`CtQNls z8C#Hj6tni~olBL@ZT4N&!lC|)li&jvgExhIlZ)Sb`s&4t#znkNw=QSBiyty}UGzJkn zx^C^E^CEkRPu)N2{gnG9Bb>U0>O)L1P)$&}l&uetB6X?t+(Sa>gW2?e0bi};dg}XN zVRsoW;S$yCwybn*Qq+s2^4PNSjGa+RY^XKkgzAo?iE0~`1&I{fgyh7+xBwpQwA>C; z(-)9sVfU)tV-TLABoH$wprLg`TnaAcoc*3~yEOlNMJZfodVBA|5J;(znYBS>gY$Y< zS{f7K#axr%Yby|cqh-`C3AK>H33a1|y=ApCLgs8HcsI$IhE0V?u#RnfJ$4}C;b+iw zic7v(G;X8BrzA}~xlM(wOU{Q18QO9N--&*O(T_o|@^FYEV8|HHL(ammf^YExv92@0 z9cVy)nBH>LQBf-6LkC(=G}SfnwX?h_>h)hhsZnXUuv)F`21jcLViSs@2r9OTHmrkr z3`-%oV~K+zY{0W7L;Zdf309Xk?;%wxFrgkL71O()jHCIulnZF=hnKmhUzq=+|7n>1 z6KraXe6AUTb?I^8lI8v52SrM{*4{N?&e?l{{KAHxHuqmo^}TrIC-o7NDIxFI77aRY zz%lJryC9lfz|vZM;N6x7EnC@ouCL9X|10APqmH>fQ_Zqbr+@GIiNRTQ)N{)78zOD* zD9#7DbH<|b3_Orla`ftz-zr%y%z^>ACdE@qq?!33MrGvIn=bb*PTTn|{_*(SuCh`^ zq(NvegKaDF8?q@Rt zcEk7ms$XFVZlH2EG)FfO;7HO0uJ^{1l8&3#XQCBaQx$9o6NMl*{EIr`f>si-$CJ>L z{Lp)fyZ+oqP8D85x%8m#tm&)2$3tZvw$)t+8sWi2&CC7-$($ikg6MrXtWQ*}t+>dn zD!{nO>UrYNB7YzvPMBjHJSnqFnVud6^5hN@gq)Eq?=U=m=N>KD7iN_|2EG2>9*Zv% z!+X&|E*!+~CKM?U(N6E1a!pScsVNES%(!WUp!Ef5?VHCU=sE5``Hi5pq*Za5NUk;p z?bb!^N_bG3YcePo;*&-MzkmjNpM^OIw|pd8lbfz34VC-QT=#-BxdS;_lmCEpg^8gP zgJnIq3*6kGssm}Enbm>LMKw`{nc_3xGI}vxzRjBFjM1J2I1I=}aBKSCU%tF8eq|w{l38;9 zhV2^wTf#pvbJ}cZ0bPW<{4_z;3OoX=Ul={IiVda>(q5EUC4R2TcZOUxTKIlMhxmC2-&w47$*LW$vo*5|zWa9LE9IFNNk{*+isPAR(oQ#;3uir{-y@dCaE z!C=YA1mo@nb+UHyRqL8Op@z8d>uf)QRzfU^XgW*`&@A}@p1{GOCB`?swf#fu=DZqP zp%aljzOCO5+l(&yjeM^yQymRGFET~9pLPxF-^?moDRf@Hk$>H;OwWf$Z;QN4zI&mC zh*~2*xcEv)etZ$Pl4K_bSW9F4CQ27gFxnwcn5`J!l;DX!cHb2QYNoX@RVLEgt2lbycio!8nk z9itl;bK(P-5?Ad#?eH{Tvz?qQ_fDhdutQPJ_pH0pZ!4k8u)wP(beL81dW4F0DtUG5 zlHpHjL!um#TMzMZY3$ksht(SJJ)rlajBy zhpsVuK1OeZ9mQwAnpuTTQ=M_S7nU33ch9XSxUr|_vr*(nv9Fzg*KkL>$jFmN{uF0oF`kx*p5ri=btk>FzAqTI9T!s=3GX_lR2ngYb_g-0`m_%*zH> zjded9qw^aYbDSi~6;3bPoo%DxTdJ_I*S%$*zTmh#CJruQ*q4-nCKuo-@|Dd7HX3%c zp%%@El9NhfOq_fklw6TNW|Z(TO)4>lQSANowUQjbTaEilZ57SJWW(m&7o@b)@~#wb z+B$C~0n6>>{nbGCY<3Tt|B{Q3j^Atd!k6Fx7djk#^owq0>J42x+>|^b>_V;?bP&Kmf)y^9+GwltFKr<)0eF2c!?UoN&J_AXjmTq^9Usi#{4FAvr_f-iAQx1k-= z(`0qVm9ppujqUY6y0ip_$=cI;<gz@hC5zgJel2=hqa#cluygSvcDpf&La^QDsR`^l;f%>@IOCC?*$gFkhdUd<6BFUx` z9u*URol|OAxcNbQSjF3W-EPmtZkOTLk{tgzN5|CuJx3{0Mu)~mClKvkbniaFEe3U$qq8JC5==Db!cA@q_oqvC z(05^4iy%b(VE;wHmlntlv0m;FF(T(hP4MA9;EH9OeJsp)x;${ztA&5pqp&Qn zuArr^)L)B%zTd%~a(EIg1{M+}Ocx@iBy5*KoNHjGRV0V`#X^QPtw@(@WMoXJt4Mxo zRz0HK3iZIKIJ8Mxr))LX=Xkp#_#wfxd;N@fFG)gQZPpQd_2W*<9dk-(%Fu}Pm`120 z93Z!-e*4@j`fXp7sRkz`rk&SM>ix4yO0%pl@t?B4?~`BJxmzPN(xXM9`}J;|C^}C3rK%-_S2_n45ZNFl_I_gduC#CXTc0XsEJ;R1jhRl6CQ9VF zaeM{wVi6{2GRM;g1D=N!=;_@etzEiIm<^3#SVjm19jwU4!aI-D{T^H&TyVCHI?jf; zPsg-H~I>4=gub+h_gUq~dsU^6!yiPaTY<6tg} zbIWM9oR!p5Y?Vs=&Zt^sY~=^W6EUm)rEBWP8dx6Y!R>2Fe_VIa)67NALMC zsvF$|HTm2=y`y{oACHeM+rMqngn@UTX1{EZ3SbuDAA}5j-+>mucZ z4RlxHSy}<`kiPVhAxLITg7>0IA;DAAd)|W(_%uzrv^D!>xyZ69?DJ}xbeqw>=;abp zzQOTaib~7Shxyr?xl2DZ^#!?C?$vuwFAWpZEv}X$_kqH9dYat0ivQcvKK-<3%L0kJLqq+n06LJ zDju^R42p(4IB`ClYngmu(=SLzYg@+i`LXHwbSA}%OKhoDbv{%f`>B_5Z~K4DFohUD z_qK|{@H$I4J(HmsFsEtyYHjUb5G7M9a@p%&CfDf%AxM8fr$+qnBkt-Jl+c@!&}7Zv zZO@j&vA|W*D|Vh6+LvfV>NnBBQc1uvuwr=bp8M>An*4lCQBFL{O^}C{Xt*b4gUP&3 z!-<4wK}FZR&x5lM$KQu1-8zm|L;183wz4S$TmMia_~#@-5AZz{q{x1hFjz6=&P`uD zmWr}Gik+yCPK4YOA?g^EIv*%OQa#IqtAsu``0Kk}k$x=|s06;)zDo&_FoM;|BLER{ z6+Gb4)^@zL)vhrjJMSq=PvWB9e>RxBlrrK*ozpUuTlTc-c&g2<<{R9&idF;5HMtbY2^YKlJh{+L$H#FuoP1uI{zLiE5Dha zl`46oi|g}1`d#7n?eX15yK~*i2XcXXEaKP@u1im$I}3(}r2Qj?rUUVlVBAG_7GvMZ z9fofIRSdV4bIWUb!b65@EXk9Jkh-J6#Uro7mMLHOHao|4Wqdwk0e+JLmKiWPtB3+V{Hk3z>W<=A$z48nx4map#&$olM(nqpQ zDomb2lcnJeNC#M>TJw7qAEq5Eq@+JV|C=$9*YY=GLNT{mY3E6|Hc1pQ|CZ>?@A+!* z!iIB9txfbnvSY36D>vN(_5d6O?Tywr=j67IlJatAf&k5V_{JY3MR({%`$)$@JMyP& zczc-64xYtIO;^qs(Sz~9HHS~bjZ4Im2|QuH#cR*@bK||LySj>olWS~8R`nXzt(gfy zoqEM+vFCgQAcdkso5~Gfd(*f(F0H?c@$%A{aTi^{YGf_h*vzQ-$8R0WvKuZS#ExBRoSz#5$*F5i+s_L*!=| zge12w&=js`F=W04x2s^_Qb3};%4vO5&`BQ8XEr;lTbYA-(r!VMXaiYc>?u+P@CFCdH<6b5ri7~K`E;5 zJy*b|c%{2_Ccci7>D*Q4&`0F<&7ztvK&WXTpJ0`}DAVl6ft;Y+b^3NpYB-qX*Ma;5BqGNem7)JJq zTdM&KJFw=n6~6)~A|v{vsweO29imN09akgTIvG?UeA~guie3yT^n9-#kn{_*E0vfH z@uIcxKtvamsO9QvYUbwd{t9HrMxJ=Xs>7xF|38&pmfNxa1VJbq&I?b6=@-e)0dX=d0&bH{2?X(dx2} zfUu;egG>t@fWQDS>xX{<_%*jbjdc=0h<}|L7xh-Pk6$pOXWbBM^rJxKs0Mb5rE>u7 zXqk3h5aqJlcS_-;bqWq2d9|+*V%o9X^_V5KS%Q9r32v-utHm!c%uDRf&u^EmuN<$| zAKQby2`3NU(y{MO6!X;1nb*}fc;TtzZ?wECt6=Fdc4Sx+bXoqW^ZuqXi)lYT z2lY%p=WUboyh-mn#9GU~x#Wz!A9p>&?e~cU2*M)9Q}*%q_!=cYV-4_UpcRdyg$tF( z7ewI~Y<5{P}(7!@DSwY}|-kxkqG zq9{s{Cn8K!q3d{i$V3o)O1z2p8(<8%$)`6)$EXosI&7+Ve9UvP=a2CF2DAyf}W8>HWIk+WD!% zA1lPD7RX`)nQ4o!+xkQ^!Xtj|hUdSvDW*{H`nFHa?MnOP;6sZ=$TMl=>00z$EadU- z{k-AED6qF$vl-aBR*@)OKev~T?<9{F#5(S!TNY8Zz!Vmzm_2V9e|&s0 zWPQpK^f^jk&4VdHQ5Z=!+_))Wv^E`vh1hdBm$ILDMqzLcx`!zCB0ni)BK_e$q8D2$ zC=#))M^@33R3})RK$ZBpFoVOYRzf;v=mhuYPhtg#AozlK5mq7;P8PL7#M zPS{3Ku#XtvIO!TIL@{$?o_SdFf|tuNb&6aJMUZPy04&mQZHuUrz%c=IpLL$_+Y+X4 zegOyaTs>J>$SjfXzC3T8{X zts*IC0!A;&#L+5_rxW`gEqUB7YQN}(gJpQI2f@%6ab_MK*cHI7#0IxTxy!djZ~N5} zA^2A&xJ`MBHyEFp8n^rSqPt=r*9UL4Lcu@;G$b6nBe~oie0%8|8fVspmph1_QajGQ z9nB(xSC@zJ7Af394AmIa4q>-F0Vsv20G_Osh;ZSI(P_OsTLZ(ZIYN&k3=MpytnjU` z=No%r2K|_&bwR}TCMMueIKk9skFAq3>M~}HZ8gkERDAn%W_5UGO@jut8g@tSTtnxD9R*% z?-mCdOV6mDGZ8)KrBQiO%tXyT;n)Ki+9b>E)$4ZbF)8oEiz<5#uUyCDIJq&2>AyI+ z`BePti-2aA7$M{AzD35c8yw`PzyxQT;66eR_ngU_lICrmu5Xnm~#iTELT_g}w>s{}lf)7(se3|tnWp5+*;uOx!r;B=g+NU9OCFSsrxD%c!-z2B5j49nC00Mw|JL`UL|1Dw&m^!kr{5H!rH zE)Wn%H1fyHKcEF*#jk?m&;3I;o++ar=idJYl!=|{!9(9Ag4l%wIdSId*GRyupSN)- zbV=xM)mZyQ5?FFE&BZAco{{`Prxt0S-X*8=i@fjKHA7JVI#7r`(FsTKSFeGz2i8eU z-?Jt)vQ}j>zDN@wHYYPhBa?7}77!_q(#jjv?{f%X)9|`z2p6=c5rZ<9+A_!Kop)P+ zwh{xR1>qF|oXvk&wSPZ*5&uQj?6;2a{50EXn~rBSSMRBbNyknk+ddv=wdxFjp#vpn z&V9scR{qXBYHw`N3A4~t9^x}EM-*j^49XH=HW}=UHROpL!%=#Ht59>YHTcx!2vvRs zwyTAg+9Kk|b?Lr%Ixja!E+?`u=?z34xkEhUYOuzuWa|DSnTPIEXUKpbIU_dGq}j4X z;OD``7bOA8iH9G8XCBH3n!D%RQLC5FL>7VX;`R8AcZ?_SS4q3!B3O<*nP?`dj+*3- z!;k|whxO?w<>aQ`e&6I5>x?RX-LLA7S2(S$g?QdY-b;ue{)Wd)pImQ$A`;VD9uo#1 zFCN!hqQRm9ca*PhTfz>VT{k+Gu6yq-7|LhUkZLl{+PN-|K>$b{Kn$p+!Z#w)E4h0H zY>hPC0Bi3DExl1<14pG9MG%`tNrGcDUv~?I-p?%_N5&GwKm6(`s6n3G$%}D&L2*f$ znlA5(RHsP$-H$Lbo-jtoJkf^Pkd{jET{3q&zfo7i?t&px|A&G2s9E0XImVhXaEGu~ z=G$EEYL#zD?M|n2arTB7?IAcv^9QyQ`@#T$?L|gq!w7&wIuI!kFurakxTndz0#aSC z01vN%wIJXpLks>BMfm&S75{htc&VytYAmy&6m~hpU1KEcprE=2LpszH#Wf({aDMo$I+6VUGG!uokcafzMsS+V@4vfwVTnvMtY) zWLUTwn9F=lrP^0#MAQHLS?rBsbcK&20Fg`~uX{C#H)Uz~rj)8SeRUtfVQ?c_GQh9> zDV*AL)ff`desq9c-E#JE@whOSfhbt_*%erAtMI%=l+fWACJ*Vx)x6jX&gCgA$!)o|PW0^sl&d;8VWy#mHfRN2Q zu{_n!ZE?;r^Tg~>0egGlW*l>IOiCo+Do~%j4JQZ*8^86g3Hydmpv$?<=WxB_{+hiR8mvW%x`ogoud&BD zyTROUbs_`eae)5{3x$Lb(4zTANp`&@OoCj@O?sa*CD=HljLx}_y!Xr?gV1xEd)G&> z?I5tn6NOGpK2i4{m|^Rp5B;Kx<@<`{+lnmGhQewxxHaAqal16a?Vit1Cv!IDK`d?;U%ia@hx4Ay& z{+-(*H3GS{gPhzw;djT6E_ydPwi4`Qzh|DkNdqAIf1XyPe{0|;fQ`ldcMyDUkBhhM zKrqHLMaF~hG!Nr|X#pG!CK}5Ua;;$QCk09Tyl(t@v6Nh!f!p#W!q&PWs_{ElnsRKdutsF} zht$LXqWIDt7gPlFx0@l^pb%0Gy1xPt00mUr51k9;JtE=9$`g(i+agTIpb1B1iB$iv zrkMxQyWYU%-ktk-c*jgo4}A$aMfh=$Eej&(I8Y2__1)J`^No%>J`Qk~FPfiyoW&M? z#HXnO(APhYiQK<*^}o{6I$DA{Dkkk#%QSQi_?r=*y@p!1)p5@ z{_;9|pT`lt{DLR)c^!)SRMG`L#-a<00i5+fe{ljlN~<>-tH={xR+ibqRzXQa*Son% zdiGVm&%!-1Ik~`?%iQ0eYQu6Pqk1|uagd4_a9Y#=>jN=^YLTnqj?1q1p*+##gm3~t zEp`cy(trU#PTQn189(@pc(ce4(>=Lh{QZ=w0C8bIEjc?^>4$nsjX{3ZFmEWxg2sBb zO;8|l`yh&FiQlw$Pm%1)7FR>m$I&boH*gJiM_h^YXxNUWrR921ajM1u5x!p(-UJQP zo7D6I+HY?Or3x4#EhG$xgQ>~A)BysJ zliX=UC#5w6}>MvhtCf(kf*L?Wk2tG49+TXXFpurr-YgyW*$em+DE5GSRUEk4(OowEp+05;`Bc|8sWaD$5wP`}*8nqx>L9WffNRdq)Y@5QUXf{?jQMuF zJBHJ&Tq~br1!fmOAwxTj3YxDcxxK@=KfH4PiJ|_VW183IpEJ6F3iHny9R!%|UJmS# zH`dlppukK_OfW%_nAeGCd}=mF8UN%oO~*z4Y%2Zi4<6Pd;2t8&#?F>HyiJG|tDRN8R>T*~d3D$%DY@z; zF=Jk{M|vwJR|B2~{O2^g7lMn8jC@;{oUU7P!?Vi>g9wNHf{X46dI-k5~?%?K6 zQUiXzv2m6UiTr#122KgM zG+9e4uoCW-`a%k=2*zXk&dlNta18kE#EN#3_d1y}A+qC%+M01Ma*kj8Mw%X1V{q9>fS=C+V5c?Ylq9Uw~yZ(n^7 zvPFGB+P1NIR$@T055QZ}ehU+*Nb0F5r>YnAw=?LiNt$G>1z!gv|1k^NxLqfpUd7)7 z|Nm$Gb(Q513Mt`P>RuD54Zl!e|Ie^?z(zf*I^9O!L_-gjpR4LoxF_35#{{J&MG1)M zD0+rqJ0^&XfMfapQqOyzw}oD*VK7;ml=n<%;5=a(h3g}2b^v(?Fkj;r=O)*%TS?mF zA)W0+;IE=|i-2NiT|D!v-?%YfUXP58F_IUA?_nYWW{J$J_we>`1MO2=)P&>S?^1*> zO`l>10=lUqnm;?YVOg3MomvD}fP#6QvsAKQYuNL$hSL!qxHhb=w|&) z-|BJlQPI)E<1Z#Y`IqIhU}H4dTdFW>ZCuoeMU4qYn*li|8f3tviqZ`bqXY=>V`r6H zbm*d2j(40^PETJxdG`_olc~vFtJl1Oj#Cpvq`fL8N>K89Y)G+^!U7+>0WV&|R&J$1 zvAz#y$TH;J0E-VWZChVZgJHl_igI7Pmi@`^*l!H_1DbI{)T6g`_^bU_iEdw_-%}13 zX%P;ZQa;W9s?|IHLs;Ktfd9=?i^|l27cpl67CC`I)&m4nF&M; zm0j$JjXdaMrv*)?N~90%4UIDffBr;!6-`@gVXmf@T)>|pgO8~H+G?Q0&#~R#nDG0~ zc(P4`Em=}#WWv|^2O7+>*N~PjMwU{EQ3N{F$6Rc)U#cC==yT1da^qEU z!(#b(qC`BxS~;bb9>vKP?xguIx=MG+mXQMbZ2>X8$WeZgKX@4}5bI2N&P7ov3*Nbg z7u30Yn(UjQIZIV;42jLw`yNQYpB)r>n3~SxJ^8JCyOF2jPx|@u=l|W(03zkz*N$cA zf$UAL9k>Ea@(Kdl=|VCF((h-NR|&sE(1N*oLYixbh7&1cFEBA;y74hcOH^l(n}_Xj zd$!MecP_ARQ#Fd$rlfJbm!J`0h8p}I^LP#L6An4qB0!-D=)!hT)sZ%;%ZC})d-BNM zHVJy-1@UV`v=o2Jveue3O?jV zf!U{W@~zGf2T={R&6_mrMvU-aM>D;DMU|KWrbG<%&@0XjJH%X|?bg z)GP1a8Hp_R{OPxfdaRr3UWnwbH%>(yR5bKr9)5NoaMiZrEMsoV!Ch|-=e_A9+U_~m z1(r|SFZx#|wDZQJj<5AE{@&+ID*wN2ZZ7mhc9NQ&-iF`z_tv;s*YMGCjRM72)SMJd8YLpO(6S(sh}-CVO)ly`%Od_zBtCsTO2O8XCN(& zfK}c8kYFya{&26p$P7~jrebrhW0jucsQFutkjo7k_pJ{G9*;*~kF~!P_|hS`Y=Tgc zO;Fm<{HV@|SEr5J$0}O**G}`Ok_&zjm`RYz&sLdnk||g^P+l0JZ$#Sb31KDLdSG4+ z^1J0Gci@v18B$NNpaVk-7272WLULQknb3k|lCv>yP_o6OgoIM^-@d+nNAeAyZ)2?k zH*)!pnPAk0#L;n61Q_9vHF%(wgkg~o-7nKi&1^L^S5*Fbtk0W;aN=37`1(bGj6~x0 zPhi$GM~6A7qb|5oPZJfv#-S5&7HQDug%0HpH+5|a*KVx*}d| zI_Ut?A|3l)A%$G~um8DH_H+E7E9INF_bQJhRnPOg%#NV8#>o8wNJX6lGSl%dkG#?dDg=YMlD8{E963J=CV{(bAadR6lvZR|5=y~sCwCF3^%3V>r+>y83Z z(V{&Q)SDl|2Oj1x+$|?57-hUNKUGS;j1GBlwON(u5WrXI?jo7}RJtJNiu$Ua z*GUcbo%MRkBD37hny+6us;o8RhV{r_P3=2*ToV$IW^#)2#Voy(MSnnOJ|-T+Mc~Gk zf8Gdp762R>pE8EDjeWoJosxdv+Tx{=Lj{bk0WgB+T4#Kp{vlrfeUbF%4iF4Q*nW8p z@*EG?77{o(Wh`qW65#gVV8V;9jrr`xRp4I416&w?HkSLBwa%39dB>Lx9xDE<>8>a* z{VerOMJl_I<|i2`vHUKSgvnOysO`MudW2mz+2HOwgAie(g2`D8rtCe{c+a17!NXk_ zi;CIiqYE0$CE3lbPTE4Ix`n^M-VkwJM~rJOlCJVaDxxtpgCJU98;7wxkZ8tB4GqtUD3E>n15j60&r@on3z*?cEZO5agHir*t!w#hXgwv%W-B$|M za|UOXWvLJzvSecj>xq4eV2WZpf(TgX8GFsD=Oe&3bpc*W-c<*DR}!zw2YL7V-NP%B zjK9Vc|5jUk-rlLQ|6v~gTQuj4Fx>z0%IsbQy46lm?Vtu0GZk=ld|dK-H;Gr5U0cFo zyx}I*)nvu~73JwHWKrM`9zLI5L`&^T&P28yNrmx+{sQ>?|Btk{4v4z#)`e9N1nDk8 z8b#@n7(`MU1VLhuMrsJ@mIeWV2k8#!p}V_7TDpdAh5?2+KlIt}dEc}5KHq-7^WXS~ zqqFXHuY1L{uItu~AAD98yGlMh{m9X4UBi<&R`*7J*ZJmP1qw#G&e^ob;$fkYe^qVH z9TbnC?(}w%2XruwurcXE9y3a|Cgo(NyOrXF8yed@8XUza#33n-lF@J5kbYB$s8=VF zyd@E){_4_jC^|AlvTuFn198&#^t=z$r(|p~ugZMvf_T*f1JN7_kvY#=m&DO=ZBwCj z+qEXdQ2fQ$EwP~3eny52aKMkhAaeI&mhu*Vv@q|IB3fAY{ik;PrB5T#idX|4#PwyD z@aVj@&kyOEp3I~iKNtRMn#vdKWT4{w!JPzWxDDXx5G}nI#_gVXh&4n{giZf^-${NB z;jCszxVxSY9@XpH$3Dk;7XU~}Vm7ZadbMoa+))AnWn@BzFu33#{afwf)nxR=bxa)x zW9sialD9{95v%yOZrbn4zJXXIUi6YM7}dA-iI8L5H;q-{1&I#dEE~ZbdbuM^R4XKM z6!Jzlzc@RO_i18AO?r*a=Kj8vsVP4QQ9HcMu6IHqyz`-X2w9!jvj2{~{sEu}%HD=t zKqv{=t;x(Rjv7)ZiDF60?xE5ETfM=RKR z!Q^qjgXalYU0POt!vj=!aTPZy@+f2&akZqWT{1hA{u<8{id#2L`f3SIPxXkFBIN1Q z@Kw^(PnRu?xHhR?$ZD^^*8S5{o&<$_7_axi)f`VZodnr2fX~Np5y2Fu=KrifJoRq* z#J--`EFUu>?oyo}bCe?|#5ay+2;yDKT%MRg0RXQ%VE{Q9dBPV0U2kp<#_uoRke)Hb zE)DUeW4hVNJqq2RW7>C1qM9JTRCQp73sI%L&{Iz;hyzGq719t7*_~-qksG47E+=d$ zIgj?WJeuzrP|7I55E|QUf4Q6dZueG&uIUN>rac{e{X0KCd4Fnjvl$^lhXwQ8aPmYK zK7X$%E}TJ64u48M78&mLP2pSsIr*i5hm7+6!qAZ7oWs(+CU_BKS??CEceLSkLyG12 zEcH8}_4)IMu8*l6Jkc4RdP5GnDZOnO%}man+A+aZd|`fK(R^mn5YZie8VBIj8&Tiz zDg>^4HjtVa`D~$?Q4|(#9pgw zO(F)JgT6^o3i^_suu{QdB(y{O9U$lLQ;nDLl$T+C)rxY6KTRyRbpaMuokT zaR892`duXmKsbqULS2p_p&LO%*8!B)90x0(LI5a==R45B!wlvVB&&nIEQtG#%1(7w z^1pmH&>!4Kvv0mdHV@zOh?1mx6+`LCPo5G_kss%*fUr&xnLp|&ofd4Fi|h9V8;-)f zd<%-}Fk`{4&ma&4(25BtaJ=HXxdgD;sq@F&@(K#^(n2CH+1c$q*?m@hrQP_w!6AV* z_3A8&q~;+ZIXQ-v%5Ov>+tT*^l?s(YjCs9A$N;F9*h@T#xt;Eg^Q_3^`l;+vGNxj+IB~;*Ty-On9NBo)M&A+ud0R@}XW}Y{MGk3VCj(V3f2~TcG7H zUo#eOHM$mCY@NX!r`7}e)}3@ru>Wx6Qn9wJ*bhz2)af6v0BHwt()Yyv5z#?0;V5;? zT&+49?!A7E{mO(VHs?}HSPJdrDgOLik8vdhvo!oL&WhES|H#w@`F$VvJK0@0>GUUhjI(nCa?t_aQk!me&o4?I+tJQ@ z7J7jjAg6BIkBBI_)Bx60I903F~B*rnGmiGxp<|QD0-PG+VykgMNUrF zH$^wjzceDlcOE??Mr#BNtg(csTRO_!A@8=9-0i+spZ%HYT{e}8bO48_mJ}V8-^$q= zjIY$_+Db;e^_VZ^V(Jvkd=)n1B~R-!$VTjJoCEM5eHBmlnOW&qj=~OT+;T74Yf@a3 z`L-g`;Cxn|+P-lU263Nn2)$dS$F{0#QYDQY>gA^Y13RFWy~&RLTbDK~JDZ&iBB=z6 z@O+Si3cESPy~UG`00kX*9Lzc|B5&@qyDvXJJpCcs`0%rK)0*nlukw^oQDo?C7}OJP z!9o%WOBrf1F*UWFF^=Wb0sRvBh|>i#BHT3&y(qC6JdjpKaBS*gp^ko)QpOEXJ`7$5 z(_miE0niccmC)I0DS-ZyK;&N&+Sc0XKxba8^HQJw!i2y_53xF3YZ8O-O zU@4#KcBvDs-7;x<4|QH=<$T2({_B)Xwo~}cecX*Bv}-9Nlz7-R_+h^6#_C#0)9(Yq3#2FHC|*zKbtms-I^R*=eouf+RfBs@YiZ! zJUU$sp+Q4OAAC97XogpQ*JW_dx0&KJ#H$)ii-qa7d8$jxkCkk~)3;9aL3nTc=x|mr z28IUGXLJY=?PPdi`~8{@M5Q*UYGHEtSk32?%T?bHITpC|xt%Ic$R%slNH$WtWo#cA z#`8erZ1^cKDc_ZfLIq$7qD{fjMPQPiN+0_+O|6GEHslK$N2b`+0}v@7GmPp=ESc2r zjgmhbCd1+VZP?dR_-n8#4ls+g;3fnJlcdY}0%UDb_s0pwKl^e!#dHwh=EFkRsthQ` z=P>E~8PCxrDZe!25tlZ!9%0|i@zC}9*3iON1;AW5PgjWnj zffNr&G;a{i5Fo6K18ma6!F;-l<+MmQ*iqg8r}_WstTgZD@Y}m@o78_(zwx2`p8pL4 z@)#E=`NZBH6&#jExD$b)QDBFl4#@z=ID~D^)!1bli=AnP9Fu^F9v>xYy(fO=K;=(6X?8sBeo?YI9F~2K77h zbF;~O&3LNTIh*^BGuz|HQ@^CpnbkjVvg^FpaAaI#Ho_YM?ol1u4@RB3&ZSS1p(BW(qk2qBb<}n+m7WU{sH};L&=*b>rshlOz z;vl|z=f8ITf3p5R-4ui0#acP@#ryw|&;Cc4!j+Hc_VseV4J_S|6ZNK+`RL#

wik z;qKn>(A5on4$B2FebU5^<1;MvWVMF{`HzCPLfF0a=TTA*$JN3LoL~0Hxim+PxITm& zi1CAN>x8B`OOuWrJXW;)fY-dLPTMypcVzDQ1F&d-oo|mZn(h3~fkf7DMDBDKG}JNQ zY$HeP=Nv#omV|@ebOVQS@45p|59^tQ+7ZDt+{Ircfc!LMT`X05pui^Biuea%wlivs z4#6wVC$dtbWYDx%AB?AG$=LN>Swb&L@tvc_Mt-s?`^QK;!NjiU^3B6|_Yg9fzoH9+ z>r5nabGeFiGQG(YelyDPW%=G4TH6IN1x?bau6~z#5Y$!JQWzjVP<2OK^hbey$< z=?VknUlwL#a1lyZu0?KDO2z~Kf333qcOUPr%0hL8HlAq#`d({=*cpyhYI$k`sR(in z9iIhHekv4{AQj%i;+FN#VgSa+OT8LX!%I~4x$*2JC#On#0)`8QPHdoTQZ_*FxcvH2 zxFWXO&GFfL;r134e$(B6tNY;}$u=EZUA{iI@i+`x7t?!9^{!i7bZ_}=s_{s%23@UG z(U&Rwt#@phL+^O^?bYzD(eyCm#oxbU3u$INA`U$ornS)B+oK?CM9VI1ob2b$Ehr z!BN>9#T=^ZNt-{@gLr(@H)5z?z^EuIR48bhVSd6v0S!R`jos~C(E|8zdb%@x>`AAk zmiu?ht8RTJRFNY4|5!C<3iWGKZiU%sSG?6u1-#xGiGFBMlT zUIB(R7qR?L^mFB68q~0_=Dob`iC*oB>txHt*P=#!y1A^LoB0JRIA~1N=Pp8oSUsrl zg2uAC0@xdNzz@d~7Z|?3>nz5f`sfWFGf0_y2n=ICLW|pGM7?JWaO5(@HB$;dmVN8m zc7AA49<8aJ)MZ{20$bzw-W`D7H_(!-xiX;3nj_U_R3cT*G?GaqDLfr@>qIbxJ$iBUmu{ohKMiRW=nHE4VQb>O0@jiD(}H`-=@co z_TmEyvoJB}%evMbmFfEc1);wOIBeLMgXp!PB^kUI)5Pp)RcV>ZsF0}t9gZmYVxm{Y zkK11OQDWb>JM@uzUer{KW!g=A*UicL9QS6h$rT!sT{W8`#GdD7TZ!60`Zaq<2WX#&9RTeUDl8U3 zy8E~RbeK$NzB;e^9R+8uudg3T8srYX$WVQniuU|GIz%R-gFy}?s6k{ zQ1hd5ZvH^}pxtNEv>(06$#0d)Oz6_RefFR&0(ASd>9n|B%WA*wq7-0f*K?Y8m^*N4 z+|i{P1*D9RoykvK0UDw>J8$)Kk^DG~WxHT_LimvZ4);10Kx$N|7!Eutt>#v!7ANn_ zhU5)C6^49t#j(!+#rT8bWl#SIApHn{t%Q9GSi&b%lC?Tyio)F;$h90}(2Y*AnenmC z@sBCL@l)^=xU?HkOm?anwou{%LX&a0TL!O+PHd3goA#PX!!}&rGpd*qpX0<17-Mxp z78byF@zJ{J!o^bW*z4gZuEotMj@Rex-sSchY;zkAMfCPhj!7sg-8Dao~Njm_N_R)1z zCfu2C+bQA4Kcd5jAMsF=%%V9MJ)6>?Tmo)-L6H3`difd5FGM88IqqHf%Vht}$fKh{jn>+U2F3UcddH{0H0DZCct`7XdKxreUy8DjY=IIIokUC9HZc!HYn~}Y5 z!M&=Pr#_POq|-7lNY#HGjTP(lA`{x=anluT&)V-t*j4xG8 zyJ1{Q>i8IO0F?Xthcte2I4$uB{UA#Y>Kncvl){V>dnmIJfiO&WmYmFXYJ>#$xb-s#T>pJe{D@`@32 z#7|)TIj+_039kcKCAjhMFF^?2hS@q=bu-)Sgmxs{H|j+t+ZTUSoLiu(JgpI9 zpV@&wL3gETny9_MeY5#1aB4%e8Kl$%Pecd$QMzL`r;#9GJ0;R^;a`Jk)%8EnZ;#oh zY?=X*wp;T2Z9nNn&EA~}Vzh=jDHUvMR2@Zz&e}^};#xDZ6M*aerMzLCaF8rhz&Zwf zUHYqcoq!@@yb*?MA(~e>njz5tD~UCrqzJ;Ah$IKMeRRN|*g^}*)JAX23yNUGJJi`eJiNy?)}k{f^yt@qi!by(?C zNo7S-S!ST!@#kvh)q`8-M&?sDpIu`SD?;45_HIYFY>{+{)9 zpGlU)YTW$9GJpgb&T=L2(yAni;i=%&ZQ)xbMNPpbtqNEBDPlqlmH z*PJ>0hXT=-wvTAZ2dm0lpvXia&XFDO+3eTq9XFj7dQ@ZURIimbXx&#S^|}|Y=j#u@ zkT%?l)paCCAQx};7A5qOus+v*nGz>%$Y4)+prHOuWu6T25Rb&;dBB5QqZJm$3{5gX z_4E@OJnuPEKv^7B-$$2>uYf}GIJ3l}B#m#diiF*m&P>4%7$?uDH-CN5vC)&8R%5lC z?I4^9d^SG5mbvIxNf9Q`z-jftm4hM9@E+d1V;JUKc@7hVw z;RBCKK;Jt2#)u0Te(N10-SqoB7ypM>jO^4poG}j20{!Srzgd}9TsxR!4F9RG(+|9| zDc87SR_dM9UIVL0$tv%n+J<>f?OAIb&-@dfNoZ>9Jf+wH3<I3tni0_XymK2NjyIa^*&c{g{ta(Psx9e!-O*F20Ku`gDs>aQU$-ujfO zi`nM3NVnz;y5G`>tp6sk9XovDBfl2=_I5UOw(X&g_~8UfW$&4lBBR@^+ndGtsJ+=F`7_58TrUT? zDhU05ce{#w79J7-oGwq!2Ii}X8=27?E&E#U_SS@^^20^+e($Pbe@~m*FOs>*wPb&M zdeh4a#+DoHNg=5sN^oe`=ND>;36R``gwJ-%`IAcpUN}O>F%0L{goO z&8wu0u+VBJdn!MakbVqxTUwHl^q_TsO|47Gx(~-9&Q-myAJZHVyJGe#% zpENXY+~~Tj$DGeLUu(}fuZCd-i6Uo)kB{%hXzYP|w~meeCV<=FpQp?#XGeNipgYXYG!5n%L1uBb ztI9nRqk9*MpNw0XBKcIAV;kQ8r@}S0d4XcHNNm2|-s2mOmSaxZl-;bRAKWm7r%u1S zaxY2Gf>;@?YVN>D265;I6bL;;w9pi{y6b$e`)OJx7f^t&hY4<5|8_@M!q$M0dg3%grB3uZl` z-}HPJH~5NL4m8Nl(XQA~J2uiZ71bN5mhr}2?(si9qxRSp13~gPpHNZpK)XoR9ia$D zA}&%enIQ9`-mHhdT(ZN=fwoZY?9iXf;;3Am{O7LhrCAHjR!0ifu2tTF-2cz)Z}K%X z#?<2d(xja7*XD{ zZX{v{mq!El=Ohwc-?qMVTd>}j{sON5^XlMG^E{W(J85>x*u|w=Rlz?ZT;sh5c_Tpl z-o>We$CJLP71#j54Y~;0em!1b<6j)pUVqDqo00GnUpYK|r&Z`kPw$-P<`OP+G@ys9 zBpIocPeZWZtgzc$!>2OK%L(sVn$8TrerX7KT&>bI_??Tm| zyTTmZsQc%F@I zsF@Vs2|v-BLvZp$n`6_3S+qLErfhU zT56i>A{N}(ecPK|=h;xNsQuif7(WRY(&CRs5-3VLm|4e?&wRWG=JFzO40fUD*jq@Y zv2303LUt>yOh8?UQ;xoxR(KaL%N@3nHj>?*-XKggCf~BCZCPP4)sNct@en!H<@=UlQ-+TxPT9b8n#31vz+ad}h zl&3pM9u4GYc9QI!{6XvKeCmyVO(8eMpSUl?id>WDsy)9u3+pDAMjBF0UiK{@gjpf{ zJxD$)Ub@`x_Z0ahHt$#kv41`@Tjg7~XSoP7Dy#kz+rq|s8KE&iOLKfC+C3V;!dbE2 z5j18!_J3e1OD*{MQF(c})hw_7MhyC^odU18I+f!6BGV!o)>cNN0;o!X`j4qGln_it`V`f<9>C`eZINZ z%QQE%8cPdKiym=taZ9TU$JuOW(>7tE5Rz7)??G@nU#R4DIo5EyQ(RA-ukF9M+>sAk z?g=m#_~N%OIl9_u$q}^v@sY9Kz07q?ja|M(g~GyO1D8+aa-nj&zd9HQN(HXKqF2;3 zZ*HEp`QSZRxX1n&y&Yi1;N>v;vcQ)Ch9)9il^Kh`|k8T~iC z-j(k9B=y$4-D|)TUZ4k{RIFy-1Hj#Z2I86KK-HbP9X1rIrgg|-u(Gl=Pa&=1H-4Cy z;Jz5&vg8Ofy%|a)GA}ZhLPv4~gWRBL=sKuB-i#7GE~DX$^H}U}@{91;HLTf4apT}~ z6wYEy##+%T`=$J~-NfU(od#yOaG1?*&s#dX+|jV#Y4+65uRAnf)H_?5th~(^Br3k( z+UR3qKlnNN{=xVfLqLdlkaXS`e(!J7AiApH06GIukoWW?u(Qm(FbgjAir~lcvU?CY z$HR4jffd`usry9Q!H8txlxyy7{;94HYZVq_<%8-t4ONG5=R>1dJjJz|e157C^87$E z(+_BZTZ1C z5NLkUvR}N|OgcSz@cr7g;*UqGl~r5)d98bmAXC53g7Uwt9aCf?!oyd%Q!fSrZqpc> zft5&cX=!vQq&m5x;&c*7SDU>6>%%9t$*FGkW*7aHWyEFd#hjSaC>W2<5$I7Z3)?e1#aO**QX5`JL=a9k1l_KPu*&%x7_6RRTqTD3>j?6UT zHknwWFm*A|KkS0>B!N!tvKd!MhVr3D}+F8~7P*uI{epE44LLDCsP zNZ%fR`k;U}G7^uD1N`z5rqH3?-VGrAgf4oFuQB~(ti&ALIxT(E92C)pWY3h^qw~ZS zc*#!dxC@jC9BRg}fyOCy#b9DG;S~Q@&1qjVO=$_86B`*x78WWS?lnfjB}Qo!!3>hk zu*<`Kl;))q&mr~URjTDGL7NuuI>#WF`{LlAcP5A8{y&%;i*O`+8(`W$yRBg)e3Vnz z?>Gze_)~uYs7)c}%Hl-py5QF3*~N?7kqJ1Nb;FTR4J^T{O%8l)5UyL|UA+E8C?#;d z-suGSB5^Rm?Y^)`9!Y~P*fLH|sj$dn6%ca_u{{I;X`9rX2mVSYPj^o;-uIESnUt#a89KP5h8;cs+4+#A*O>6I_)Rc7 z_tM0%LM}PyVDZzGuk`pQT*?vB4ifYU_uYN-H$(M>))>3xN8FaeQ%_v-OvJruqwsaDDnmi2`>~Pns2Y?x*iv@l>njLa15V)G@V>#OZD-Y| z@4ZH%=BL6mVKpgX0&xa1DL*b}fZmj5O*6oIICnZMFf(`UF-vS8*)yzk1{enBJM9Ne z6_AJ=TYsn?f{qYJpMkvHhUe?W%2V@Ntel^3d8AlnOiY2*nzuZFVBhP0DCe%aDt2xy zlR-SKQIsdj8B+rm!18Qogrkg$-B|O8Feq&Ab>hxZd*Q(RuGOh>SEt1zO*c5aU{}K;!v84f`Y=+Nj7+P5kHc+T; zgs!=nWd6Gz#rz9MqH2!1C|`YVy+QGo?>B}QSDdcidDZ?~kqg%bfPakfkt@i3iTQBr zxelA*Yd%R^L)*M?S3_cdcb`nsT!YxxkiYWnJ58IN4^**G;=hl%4i>d#Za)gS@Gfr8 z#3)G6+z*(Q9L2*~5LL%TcGQ*Zw`#Hksg6V(sbNhRQ4!8Ckg1T`jx)OOh zJ~^L8)WWzO#GCGkgP}R?{oedN#P#ndtou=&{qyVq#dM9skab10MjSAVG}X6`v{D&Q zG9sW9RZ>8LGg9%RoeiF$|8vzD@^CBV!5%cm#xLUA9M*y(!AiQ-q>!sbe*Nz74Qh0U zXRH^s&2Ts8Wt!`U3&+J4hx0Ty4@!-~4Oy~=ynqp`)C^Sm|5AqDm7r0s84S1EK$Q7A ztuT~t=o_!y=j@NyTv($- zTiDqeFDIZT9IjKzPAz+O`X4{u!&$z$xxW_LK7uinVEb4tUJj6BwS3ph@H_@zO*(&9 zhpAt^8P)TXH(&m7*vLn#n|nmD4`SP&RgJyM1R7kq$5TJZQOecdh<$bbj0Gqs$G6G6 z2uW}rj>xiSG49(O?>pbW2|R4V^(0_-KI*pt$i}I09ogLkJx{htR~#Uo{GgN6#V(Wx zNBWe3^wQ1^t?jEQ_u;GC-A0T!Du-l%Q2mOyjqxd1MTc1DA^b;iA6vhxgnvy?UooOA zQcfA4NTrsDh?v9r-umHF3bAjjc>CPnz8U2l)E8Vf<@|`)sy%RXUAR5i&{h|ji0(5Q z`qt|X9zOBRufNoun3$NCyzDduExHbDw99U5bv6txa70u_)xGD0a1+98->XhNhgrqN zFWYx=V3j)`#{8upN9-R%BzQO$yg3G1-&$fqT0lh=j@mV=KU7myCaWH!g zQfFVXdKr3(K5xBOrpQb{jhnJ?#ld$4V=6>;eGpg{VP0AC)tCsKR}WkcnxJV*yj^Dn z)PTHbriz-M;3$y+!eW;N24~NhQ#4a0y`kLjxZ%K<@yZaF#72E;3r2?`g9Q_&fz`sS zV)cPp4(|4i5PdR}qb1g7?HsI_tj|7wG$ooJN<8ezhDXF&W)fm(uU}IIj?R#y#V$&L)CMi+6Y+UzQ*AIS z9OyD<49_zZ6lg$aQ5~v|J)e~gNU)dl2se#Bn`IwxB&e$WIm^qUEv#G+2vbXJA`w{j zBg_(R1d1A?i9KLj+gOr4_`@@gP7m=@Rdq-U&FdEg;dHO$!oq^fCzg)}|xIx7zRcLUZ#oG+AUv6GNESIxHDn-{J^S;q3mZeXLz= z-Pwqk^971&9Mfb6641F1?ovrRPC;xP)Ds&=WLvZ0X8tx9;k7MPbI4r1ytN1V3_snH z9(du$82RP}pzwEcQS$qimGci67X9{<0Y8_d-%dnnVsfs-iPips+MUsu%tTV3(I01* z%!&{c2p@6`?SAIVB5=T0>F6vvS>$DsUn#Dz#ON?80FOAD&t5&P__(74`l^v!g+Hp_ zSQcyD&xi8C66`craqU62Gnve3HD6#e=gJ(=c~fD&HfFw~0jioP_g_h6D?ljc`SPMz4X8S~va z3j37dw+K zM)f$r9p_v){Ml%Ifmk0{+YrSaMAMeeVHcl7)WG1F2meAhsUn0P)r>DGDHtC`T+a}G z(0l;WaafNLfRDekJsp*vlGBX@gfMsoR(klCbeTPgZs8LLO9-zTT%jibrUbqz-SdK} zi`T!*3~(X4e={tOaBDf^Xvoq^msxWsJ+$9J&7?Tx(W&KSFAxhms`RY;{>AQlOk%S3 zQS^jVb{=i=;7bCS^60Ep_oWhbpc42ZX)?l*C!$CrB|xL7i9lOukj|+R)Rq5pV*3Ki z$GmX1alX^p=^o`QF;uQcd#@5(4fZuy`&dR3EZo&f`0{ z5j=I$n`+4FLH)VZEx~;lzN+a#&6m$$+-dQS|I;4r(CpWS=|wY7?xBKt(uk=n=oWnN z;+hNG-`3v1WqH{ds@n3??wQLHEV*7}IB~H-3v=ovu88-yMIV`w;BUjhPlWOu#C?gH z7%bIG**0z4SMA{Hi69uo0cCkDl}2m ziWi$X8aDQef8hP8q7aiuQdKkVDzmnBhlegzLcW$oOmU^`=!YP1==OBp23NQT-!SjX z?;E9HEQ{gNANE4go4n1%`;93D^NWFbnxZD(PxnaL7Vze~N{5%PFV_@o*z6$sCX2w7 z5@5cm&2F>gWH>5jR`(c8N$2GG?g}S}j*b?9#TjO~d%%LF@i6M}9@pYw*QUd_t-h%@ zeimAGdJ40Dx@##hVjQ?)?QOB~_-2?Y zVYL^s6lRUc_Km0hgmy7c&5nxfc3iIJ+}P^Ieh39;a*a(-IuDFj>DOTu6(tsJ@^;0C zL%)AoDN(FY)Z>3VT98VZsq1GeujNoy>Un1>H<1W8O^tp_Kw5&%31*+2tXFVNw&8hc z7*O#8{?O{SyC~3py25I{bQp21l{A2M!pZqK(Dj%iBi0o@6jK`nPVgJN(wQ)?oK#%y z+(?z+*?sfImcH0IYmQfoABeYv#a+qM7b_>{J5QRstA|9;4}^|TsdyB&Rn536S0lXm zhtB6>ZLR}EX}??kOiiAl(TqA^|My=OVp8#{sN9{YEE5mOgNgwuWi3h4Oqf@LQfDq& za^`08ds?kj;uo$vsFw@F6ClDyd(UPP=JM7n7bB9PRmx=5hGJ2qipStwT<9jR(xuc- zCqRQVkjArysDi!u6w4r=vNs6byvKhd#e>9arUqXF?84Y|=+EBsb`}S8z9g9EHX??u z52swbYdA&=AR7)=lqe6Rit~bk&DFi6OuP)=HZ{vVb&Yo&I7s|A(v6DajK_Qg4!)CC znA=>GVzt_iklBw}O}yTt1Zm(?bHjmQed{M5Cz`)=QC^tC-Nz&jz7|o!R7HeemcNSN zPMH-6Uf8WpBoM`cbvRiU!$p>

aoy6n7D81iTcZVp4#&W z`l^y}_Xjdw+?3sg-GA!`e*PF}^eg_nx2q_q50sl6WZk5y$m6&G!;r=%nN9^qM8(%{n6F4F)}N+xnoY~T~v?l_$qzwDdtf9QM3{` zOKx_iN26dPk;!#%Ln8I7RYeIQ|EQ5=rDs{WOYx`pL6@YXIjvyuktfJ0nos3doq{qP zTJF~)4j4I=%#+hthUJKa%rHrrt@-$)aHRjeRk%1J(ZwE*qxwOpc!0XiSk3!YnJ zm6)>^92%o?Z{oYEV(PLtbt}zdh-w0Y!?s`@b+8R$WGzBDl}Qq3h&@&@jf}8tTO4hu5!6p6NNcHSK@Oi0#^2Pimmc zn97af3x7Mz?DZu~J1HTHKP>q|C$fR>l9w`K(QhMz!Lf67UPV|Vu|dfX#GN=etly(| zZEK5_NWs4vusYBUN<~=Ma#4OY%-{%ZcXvIZ7*Ib=Ag7`#31fW1?B2ZB)632qzZ<&qY{tdOB^A3oQP7#7n9F(HF9pA8WEL zF0R`pAyN3}Yp8V_#+3-M+D1%UVl@MCh3qm+CY%_pb}+eG(KB+8LT4*3eOL@^W1`4B+os zPh7l$)xjBjKIsh{w_ky{fMogcKVtkpelTPE3%&3C2&Y=A4c5Wag?e1!!`FL!l@@78 z23w#VH0c41O^QSmca#LTvg(k)H#y(30N*sw_K%{aW7!6$Z~*N_rPf9&pzE;h+B547 zqsL&Ae@bjhYFpup?`uf6m0)bDEoxUX{%^SaHR9(!lHPV?&Z^V8G05TAro#$N3g zBu79aRCj^@x$E0=RaSHpmyZqIL;*m3r2miVa>Ygf{k;jwZLW?0gH|4tExTyE2s-bi zT!8Mn4>uLfhw8t_{O$P25WGjI<{LeDO$?AbD(L>%W?<=I=eZ;HjDwxn);K|PgGSq~5F(ShGT zr#}Nh?88uWAxwS&(D*RLGPr#D!pk~iVQCQrMv3Z%9eqyxCC+hYC0Xy=iPnGq!dAR{ zL?)g(_LnHCWlgF@gk~#z;-|+9)~unw1Kj+E^26c_s`!(|+v~;3Sq4`GgIid4jyRI< z*DJExr7w?9r2)4fgSGhopWl16ni?-Op5OYdFmyCCHs3yv80F>07h9|ptGUqV4^LmX zB3{8r9>4!k?^yI#y_6i9_?c?qS}`%t(B_7P8eFxLz1sdGkUQb1o$EdMlLq|lA_H7L z@j_~ya$jQdT}gcvnw?elBfi@&pUI_upFQ%k9QdL?T0hJQyxDY)uDNthbtBoxF2Ve5 z#t*`j1C1Wo9|rcQfbWe%!N1W!|5N4u?J(u?agUn;CZaP6l!ER7&Jx!FD&RwoDckC! z|LV`Er4xoQGEEVaaGwdoF~g7WxL0WJBjW$#gTk;*7yiHe+4Fwrf(K@=*6JKk?xnJ3 zBG`JTMf6iTtXv}&;^S^~7!p6}0cV6|pVo4_{qavZB|tUg6EpMiCVhAyY@t}}nZ`4F ztI85)nD(gL9<^D0XZ{P5_Z)yNp9M?2Ng<7@9dm;#dJSM|DTz3RS;6Ss^75Trftl0d zm_4Ul#6{zsMM=>9qd=v_3AF!ceD^F~h>N&PrC#R}6*72`qGS?x=t#0Zd2A)!-Tp$U zCc|m{jTPxvaP8PIaqmK-`Y$$#GlB}QObpO{ z1iP(8Is@Iqcm3>M7jhm%h9;oJymdPN@BOxHPFOQ?A7proYatx|Sx^<*ffp}fCquxM zc!717n*-n8K0pAZ^X+y-jWMzhl|UB^#lT8Wl5H2c^D5<{CbOg7NrJD;GOzEH2(Se9PKDmb5XVvmZrJ&N!eM|8yLkM`q*Iuz$R z_?D(#^ns)57@P*XB^JF*wm4EP{t=Y!tYh*WC8Z+&A)Iassw_4O?yILt37ZtG=E6;{|be|2@(ZrkRz7zojGf3`n! zB``P2gDKL^a*C6gS3nbgX3fC57ew42#KSB0_6NQq<5AeHE_3y5zurxTD5YV5B)j+v zSgn@)jKymtPxa>|N1V`5Tkya7B5U}?O|DLF#*`WK^~@qiI4kk97~RC~_U{yxD8IEN7&$`Q%NlmqZT3o2cqq zH@w;*aMoP(mDqbLgP@!Vys4tG7P)`G0*>lH6A@*OWH)+)YVkt~&Id6|R?$@4u<@Qe`= zZ-WD6-2^wZPQ&XShtsF?zi6w2`>L?X{oY~t5pcRMH{ZyCx>Xpad4T{1F6XU6Z(CGNM-(Vtt zKRDq>itF=mft8>?#lmXbRP+7Yqa&r2D4Lr`UQIh}4J8(&*Q(@OI;R;0N`)6Zp04xkrRy^rcrr|pEF^f4q5h6z z1-e%qom`6jCZ$AgiH>DLY`5$_5H{^4tDeJ7jb`h!))X{;eS1+IWQ7^DR;H|q;YXMnMH6)Y;gghDj-Ut6!hfAf#m<`q%q>Y@az znJ<{y)1rz^JrY=~COysxNx)z6r#6vWH)8LPjzywQmZNnrByMdH`wsy9G(3fIk&hN zu{T@2nv-bV1~j`S5~h>;!~}K^+e=}MdU;P)7x}DxSg9l$UIl7`Ld~LO*yyv1nH(j{ z$Y;P}3e0I)tIDc$y)0_+K{Z8vxp|N1@^b%bQrnW_;{Whmwv=_f zw5as=ue!TM*&^I_CW|f$j0b>_|LXtO&zRaAb{pKn-LxPHJUX&HzxKQWytG3ywYiDO zd{EVCB451Z@+G9xROyzg_>j zUm&{s_@Zy}am6})qi^7P>lQ9YouqJ$A>h>m{45psPpjRnU3l`((cMrykX@$&rvcBW`G zyJxP7k0JgxAdTV6qi$biqbsiEI2t8T1-AD7LP)?9K9@xq7LpOdal)V9Rkyb}*DO1j zSMY7=vs85+g&J$lO=Yo*@T*^MtGB4`2jGA8!&CNs^xP94O4K0M>Hl#$7}s;t_kVHr z-r;br?cVTOB0-`XErMY57NUh9(HXra%7~VTPSnv7QAh8Ik}x`9kc>V=4;7E?+0A>oBNA{4X(@@q zJsOKrYl~N#qDWP8i6JaAwqS~b*@-ba#xXrc$xXm+^Rw5oMU`<%$(EXgE`AzIBP^d! zAsOl9REQsXLf!Buq0*UE3(H3-@0}-4Vo0K^*do54;x6TW!ALD)9CUbjSxU56G*gm z)F6*kVs62A9!s7*))9P@Fcxo)SGvd3MYKsAb;sw-efPj04L#Qs0lJ!sJOAp-1K*e7 zxn=9+$W>$W_dd)Dz{DpeCwKOTbQXYX2sq_{fo_4!)px-~|7g3NqB{y7$oj+dsHJYF zR&qA6o8@&MmRNl2?d`oaOz76zU|1IzW2$tvW14)dQHU&-Y=g+H|8Lv=#aE?)b}F^U+6f`yj=uV~{#Za8A) zMf_8qz4MDLM_@00Wd6{e&2W7E`A5ic^$Pz{%)dgYuNvPC{^J(uzgpl=QpZHcQ!5)A z9USowLm)!+gxpzfZDJ*5QPbGwL)7tX%h@fVvmdop?nBvQcEOAe5AT}YQ<|3(44;Z^ z&~l$EA*!8E!^;cgDP>Y;H)5U{|B(FhVCFK`G1y&jD|j0^mMufY zR+UM-CIW#X)ps2>8A#tlNL1Cf97595*b+@?g-LJ;Y@W>9j3k;Q77d$ZV-7pN4-!%e z5^`uUO2UHT?;Yb(8mwbt+~sY{rX`eIA8l9yIg=Rv4~iYgzUkL3dR+F@Xe7{0FFk-1 z4V^GP|9ir44erXc%ccTrwNv~NW^&Q;Y zD21Dk*(>UQ-OdWw?Gb-}+JgU`>Qvy?dKwg=NFfWt2Q0fE&v?yr;9bvUYnqS6+Tb*a z#awO$-^5G3c}Zj&7L9mCD29uT`}sMM8tz+kz&&(tUhCC6(lx@Gf;ch;3pkg4b6d7K zr^|^FJ!mgmG`jb_b=XMV>Nt(DO;PvFGI!&uHixPG4U)1d z;|H5!vjyq_5Q$G|NGWF7Y*tNgGA#?|?SUj0 zyewy2%TtGzHm7bf5U?A{mPJoD9N$JAqo(}V4N%Y)xhfp@sGeF^(nf)UIq$Q#8N=e> z3N|lVSp!5m%aA^|f5G<1;fu7!Uo~WgBanDj_YNv4pknLtfcAY}i?k!K)*Jgzti*rt z89#~5{2*&3?p$N?PyuYX0EUQU@jRRVUuvE#dKa53=65 z2TDRyrGFY04|NRG-dmKIwf(+S?T)(Djzn zY5X?uBv{-u|2!<-v9)e_c_ZN0{i6AXkCty=umm$iB@hYL^YHbne@Pb|^JwJKM9LjN zL5QF)w(L9w?B7%gzLm&{6Y zx`bdsEw1#&%pQ>-G&3{viA=}4aaUzLd!_XvZI5|U!>_%g4w}Y;Rq=nP6MFZwx+Mx{ zT@~5Q8hi%}^%oqiI2VX}pptpqcgKNw8@RiJcsbigE-1LK-}mr+5#?@u0-O*7x`jy4 zS2FZWc~vadiNR!b99RZE>iroJbSf~B#P;PmyS;iD&(CA~KeTr4-K-b0-bqm3znxSo z2Y$_4on&$ox-3>DlS=;)$CZ|`l1fwDQ$q&!_iE)&%NTYZB&>MQuz}t+IQnJ-@N7Cg z2i2$$AsC9K$+S4cB)Q3yz1Q%QGTu|1^TT(%zykX;U-Gg;8Pjz?p4-@~tLo-M(HP3z9%9k=#UQl)p$clvg1?6!U>q|{Fr2P~cVwUe z4*r`D*7;$vKz8hIMyC*o`JR3kHZoHPrna?w*vC4q1u%{#H7|8}4nnZ{``XD(1_&QM ze`>VQavv~(CF4XI+YvsDiV9dM%NN<#BiFxc{+lZ>o{mW8d*rh*_QuSm{`%#f{N-Dp zriJWAb&Iu27=Q|e2?sCr)onF6?tFcF)HZr73hKONv=enq6{}1M0Vnho45q}{tMl1w zgxaZX|LQol9*(bwxj9Lvs?|V_LOrV!k7)L|AyzNlfqA16SN){YCSJhrm4yJGP1bkvhxygRB;gB zcYb!(f*)cXx-Ovp)MD5Hfo*PF5eG)QT?bj17Q*Dtt1+;jqVao}w)($xYqGxY7E8r=WQt6D+9eXn?hJhSe!eFI{bS|0XYAl06GP>atZB=OwX9)yx zRMjGHGKt+nn5=*1HSb*OBw*f^w5O;h=a(d^M_hc8kyg)dMMYHKJz zVL%o0Ma-%ReOJH3)|CjB5zWr&-pHIRTmEjP|`(_ zE*JSAY)+p%Pw+uZtwi3$cu3EdaiRp{LJ+Dk1OqG<|AYM~(pqsFM0w-~bB%|!2E9<~ zh9O0QcWL1(K(HtF&gZKiCYc*CN=UAkWaIqTU7btfliPeW4o?Clj?wJajFgGxpyHp# ztUA@{af&6K2~DPA?lKnJ8F{gK4pgT(3<$})(9hrEoCkW24kNM6bbzenr0BKCtGg!q zq6hD0B>(UI-2XuZ1I^vvsL>!C2apD;aQTd^bs*rUtB3niM(FJDdfif5=JxSI{8y_P z5+s)VBMVRv+}US+dJ8i(O{w3w2MKdcQo7y@AIo_W;CjDsm@c@m|+uf$@v+&_K3a|&CEc*ezPPp z)(?y|^N=a&BPg&la6E;WSZb7%kys=(PMn`O`Kzqo(#(BD zbyK>-(ev&j{ZZi`Z=Rf7$u~#&{CGWQDA97j$39@6iu#~bTDuOplj1%6teGqpyK6=* zr4Ovl#8*;8qH**egY}zdkTSm*&S}1CMkwQZThE&RMuGl6rC0yi7l*-dGWaF~cndP% zH$;E8pLbfR$SA0~uGe1 zP(o8*c=P8DDRt{>p%8I*e#LS|2U;}Przi^gQ3&=jNlYYY0AUh3+SgsE3M80-U6qLr1`{TDhapfR>r#F3Hu8uj?OgvSegrY=7g3pS_}88obtZ%Z(3IDM@E5tWRkmZnvg;kPx(H2R$3He>tPm{eC6_dPyQsfRIfKeTN>Glqshd3_y6 zn!qsSlpK~<%C3VP3J}R+lwu?zo+2zjlm$r9MN*XL{?QL-WqvP3AY4Z|!`SiY)oohw z*Pde$7asRZWu1!;y{xNSZY9fZ>L2Rc38Q?A_3^kL5~MYtFAh(s3-s5LrbfUODQGtO z;Yy~`wbUr3GhJm{VXG0MqW&;)`z0yHfqLQiTHPq{P7G8r`?!w8F<_W z_x(fCJaj!78z=aK0e!Rn?u4@1+FD3DY#-Js&Dxo(4s9{!)l42|4D>9SR(PV4+=SAw zve?#c(ZSdXM_GJcq($lpko7?PwpP0rPtmXghjW@rA9yGG7GUGd0_v4-w+xQ0R#HJW zi{gv=^#9KlFJ3`!)=cJC%HYrT^j_jSw#Nv&xJ!$*wR_k4r}anD^78U{QsKnO#RoE8 zdM_iPlEakBjziBPq2(Fax0IZZa&gTrjHL`UlbghK8@gzhx=o3*`R}K;%A4nsO?OZC z4(%2g1SPFPN_|fo?v2ls7#(=0h>Sl~L7_u;MhR3j{KU9H7W+xWwB7V{14WTuiKOiB z=~5{cZlB#r#+!|Yr3fght^AzA z>f3Ofn~ZI7F8|C@!oudJFR>*_>9|`K+Yat?<3(baQ{Y}HgG<8Uq3P+^D<%QB z;7hQ1Hx;?T<;|;lX9v(Ejs+*x&RiV&xG5u_HP6fh_Io_|5g7J6;QPPkiENtX`RMsz zS5zApv$AWjymUju;%3J7$o+?6p-Sc>N_MaL)TT`pSOn(} zf&SJG2|}CNA?YLoShxvXh^&JovxH?F%4yonu1$0}8djO{=1h#k9eiZC8jGD38Izqk zHTBoSe^hU*%dn+w_P-=7UJ9jtqoc)+8GduPWwlSC@3jlQ5zeLSz#N{$7%Dpz-Svq6 z_F0N{y%Xu-S31T}jtAtbFoGYNv_CfT>nv#3xn|X(3hFj{V%SU>l>G;!RJyq^lM;kF z9t=><>n-tI$RV!ETgTZb?5j)Xfx95jUi;KWuzhEJ9-mEVZ)$3N_@dW}b*-EMP3y~Q z6DG&BhKW->R>==R*3khi@iJRdzb}rWj}F7=pxLOzT3+P^Y#cAlaLN-AQ3JTK>`upt00G1!tB+yt_q%`1HvZLO8tzwE zwm@$-@>eub(~PJ;?STZt8_9qG;Kv4&WV9(0(S%BkTTbMuTs%>U+aK}M^(Zct2vfCregANFF4IiC z&CRd2KA9)h+Och!cN9MP5o+u)5RB{9(X_i2 zbj`9yWm2oh^^MtZRH{Z;#gn!clEWuC+`wHO3|hz19Zyshrs%4Ml4>|evH%hFhGgcZ zs+0szSNvC|xYp%YZOd2~zPk3 z8z*-bYqE(YX%jzr-qkC;`!gjXsRvM#ALre<{6M4q48#8pf8?X`WMAmPuRW3f0XGTM zrHhmGg389VwU1jSh@-Spk#$)TeUi!|5DkQFnzAAlX5k%~oPZr0kQJXv2bQJJyVe z<}@juH<^Mi{p-&!PlGRYF2T%Fl`D3K*8$Z_GsPF%L;2&zw*RpGTmgcAap^*a`%wQ) zl>Qhds5E&vvljlQ^)YEIsg>IkGjdWdL~x}={5XgInh!SfSirS6Y_sv|5(v%ZCfMj#q_)qFP*olxnQLeet?GGaaFTzY7mk z)Zb2nspfC@`y6mg;$jUUOx9uS?BfC{h6L-b&FWiw?W`A(7wV?t|BOEr_ChUZ9>3)V zlg2M+@ql>kA2sJ+Ej)5KzIPqj^7v-sl}z+qR{lkkQH)NEjjaFs@Zh3jAK7@QT@vaJc&=*>WJzgNMv5ZFJnZ}w*o3(mL6jU;$Az_L$v4lHcE8}qZ znv8?OaLe?Y3-g!`OB*LZm+WqfNZZ9W~0{Inq+1vPUr zbkdpkRoZY>s<_n=k<$^;`O0|iyU|=v?ALUjh0YfnDC|S{ync zd=TZKRT`fG4+gBv`TA3@4c&&>NU}VUv54?T!cK{ErZP7K4HISfx}}O6Opx2+`5QyR zX>(uF)P^^exi@>9)XtkU+aN7Y$lY!cg|6P&D&$-#ib^| zG>g{L2vtI|N`5cmxq9Mg8&m1Bku} zP2;a`Yley&z4n3jj+-P36EQJ@%y89;*9}m8d$&=4kiYR?{H?z~%k&%Yk z&alz+?xa+t=9g$Mu&?@lP2!3mdoo|o=q+-G_8d}&at2_T4@a^S+|@7;=iA|^n=>mp zpk{H*sRGpxFbOo!Txo+_d!#Y1QcvE}i?BwQ;8N`3dS3U>5?EI5#Xn~73O3m?BO z0arP(1nu%3FNpBE+KBo&zaL=B=wnaP;$X*Rs&VlqQARoTaCI#3cI-XP@8x_t2eFo% zF6A8g3AZf)A5IE%J<AAs^4p*Ea)BR*Tkwg(spzf7jn_yCs|hc;2_r zJdt?D@)uVJ@joittMQS1FaDywQkVB&f%Q`9W~0njUAz9Gc08LWuW@`WDs73-de}0J z3p8X(VPA`Kz`=kkueI-SxlU5$cXvu5XNZc1NX+ucpb^*lH5l6a0ceZPwu|q2xrwU} zf2s8Y9JI~XjLJsP&#(lg=zoj71S&;iG-&G}-jg}V2b&(}d-$Ft^z&Q$ikPpd437mc zNRzF&Q6*uc{hvzCUAE3bYZ#`jM>dsOHE--m58LQgQfSfD*@~WoZ|vn2svegk(v^@x z>8!zpuZy4pHbdvyQ@-stvR;i!y0!V^-U8Q|*J-a+n0gW8&Ogv>kGP&u1f6Hj%+7VQ zhRH0B3g@2}>(rf#9N@9@)vL<{vILDZzz z!e21xBXJM3g+}h9;*?Lc9WhHSkxM#6V0v3R+y3Tt3|%}M@&@L8oK0m`x352Adn&<= z-Ul&*fk{oyocqQTa|mzJ9lg5Z()3V%63fI#-k|X`sxRK;d3pZC<;?Bhx-}TIUd(Cf z8f`t%+L!x`X0KtQnbcE8eyAIBEoPPoz8+ictSEe2VcOJ@cqbsyO>)Xp=E0lnwYwJq zm*fGb5iK1N+9y)v5B35262gW#-J;R{TEF};Lf++!##`E({SL(&K9^KmhB+l-cpZ|H zhx(D!FUhbiq9uvBItC=S659iQ8ijkem3XKrccOi%;(RIN;2i-I&B;{7C3QfP-&zK? zp06TSz$8owsZEwZ&4UUK<%t2u#hvqH&)G^q$;tVw8O!*5XYMEjSm&p_+rg{A!ULb& zzaObyDijRYFMr?;o3^~zr8hk28GBB%HVvVvd5<;XqqV)S&g&(PSs�>kM@0xvP+b zrEHWF?#%664#6&m_br9?KYyEUDp3anE@scyFaZ;ZOT;yn2pX40GP$B_J2|WXs7-ewO zfo4M1@7QZ7qMa3piysq$ zs>NZrl&tqmZ#LfMtJ-kiMNNr4{CZsp9&?^goHx@J;0u`o5U$TUIINlBoM(_Gf7HZ~l$ znQmhS2k)kl*^i;h(kx*1=t(P8Fy-sDJ+`vY1e1`J@8Ek+dULYMzI|w{CR6kI%-AxO8&7` zwb^FXnz`LF*N!5c^M`SAC-0OT@(LR5zS5= z=fJkZWN2Xuq9*{Pmam*<5YZH0#(dnE9d2C)ktuVqw8?y8*i?o-f!rMufxElb>0=qp*VQ9lD1 zft0VXMQQ5BiKLUeM2gmAJm^YsK3rUZ|xEE#XV=h7<-Y*Qiql~gHh>5un2zphoSzVDN2TN8Oj`ibR!_g}wnO6~bu9n?|%V8iG=Y7hZTf*TVcw0cDPQic?rz)5=_nGdLx)tf zLGJsx%*;%YhYuqwD#&>QySr4D#&&k}3{7|SH?%ulN*Be!NHUgq#op=g@Dhz_=1K3- zj)fb&ov3xDCasREW9Kz9 z>*J;HQ@3AO;jc1r!8+N4;vRrYVt(P{F=55Y>XI)I9XWeT7gfQUNQip29p!5WqHY?d zNayngBt@wrgh;h-2~E#i&A4AFchnz5zk=-Obv#JJU=#n4P#`c7DwrMFa>SUv47FEB zm`oMgBhpVeqtAaciw?`Y-!dThlPQFK=J${5yC}<7{o*IwFYP{?=MVC`XCXuL&B&Xm z@tq!F%EG|IzZ-sKCcbLIPa}uqeZoQBk~1Fn!^0!)iAu9C3HIQk_(&|ZPP8>+a`k!h zQ2LJw?mUuSQjy4f#51cZk*#E+%0FD`pRtHi?_t_zL|}!={Ia!^noLuLjJEVy8kfT| zRsvU7ncXIs5_Ji9?5ynviUuJxF$gmJS@ z!83*Z7)*jrpyN!yaj&?&X4IrI0tk3M#;C8{rdqBMnh!YT4X-R1=JPIMzmBA8a8;U0 zuTDQ?JLI0Z55uc%!%s!`KfnXl%}iaF`-B=bFTv$bY0)4UXI!!}QXJXgUu6zhRDcnu zY3?dKgc=>!#MjcIIv!?>rT#Q&DThE|=81tm*}hv<=??mn3$)AM zhM}#+v#A~?^bJtBWtM!O*r+wJWfB@}d+jlt%e7!uoMgK4$?f+-F!e=;pj*Q%Y0DO9)*PoHV!nlkNTvYuzE z2g9a7-o#{FPqR>>_NKQEQ>jHc!@3hy_b{P<0K*T$gT?IE9O|-uWtBrdNz6`LINSR#kd1uV8MLTrUFn1l z(}4COY1y$ooa}^q_3ImD{Cpb%n_)7Keo_H|jX<-9yYhVkOZ^YL2$XAV^9332hD-Br zE*=erC5>AiVO*fT>t9f{Ic1H^Xms`p9D=Zpqonj@P_Au?kD);KcGmZo160_Bc*+4@io^;g=_5}_& z3C%+v2anoDts0kNxJ|~!>Q|)~*MA4pZnUpcvW+{^zl@2S#dV|Wj;q&qSRJ*ysZ{Sg z<@StiT42U^U|TM{rcN3&EB88IBpLf8Zz?}+8Zw{7h^W8~n+~6XMog-BMLr?T3in;wY0HX&?r1Lk;p=?=XB^PP zD*2Z?y1c1vDMfzNrai}*RX$hFO0KuMX!;%&cJar>iR72w?`A>4_q`=HLI?1QuU^s2 z@9^la5^4zc>u(+Id*>7tU1Kh{OS$;~Y;8ptFr4;AsY0u9v(_>7V`0HzT_lz~eFzM5 zmhh7Yf52aueMCkLyrWJjr<%C+G{(E;8KNx2!KW;x(S})YMc8&Q1D?Y`k%#0I=5+Hh zlg!$&dbr;N=MO$m4LJF1+2$Dkut9s}wjXe{R19ztRq*9L75Q)M`kMq}%^}uI&>meeg{(u8VxNomM==`t!`HDy%rx)mX@#~NO z;}qqOyQZ~Y(KVqZvQwItWCfN8q(zzriq{+nZQk!Ex7hzseZ$c>n9#(@5sTG{=*kW; z7|mkG;oo#!hRH0O?H7 zQcBRhMoeU`Kh+!6&;-lh=bVFx4kk2btQ3@5wqV%~wUR5vd!xpjVq=MY&52=Ibvv0E z`)3QO@f^9K39%*ijv7{<@?sobT&KwvEcQ;`7PLW>#q7kfXMluR>W^HHe;cJQ4C@5n z1SACqUJB_RK;q6qih*{k^318wR^7sp@2~gWogy+hkCqmOk#zlSJTJ1r{$Zq9Aq>m2!@^%?h;zeA(;yp~1>wwlsO9QI4 z1?K@N?n+a#&G93Q4gnLopaH^UvfC3=MiRhuaSFNQ9N2LU+GDzUnZ6=?-2FJg(;m-& z_vt&L6;JV>82me0qG(+G=Z|n43TxdSZ(mPB65U?PC1JH$c5$m^+zpUdyaWc>B?Zpj zmh1pH$UFFQGFz8GDD-?dt`&r&VXGoNxuZXJo3<%qwjhnv)a(YFE1<|0fW`})o5;mn zX2P5Y@F_`Hm(Of0+qvA?9go;d%6?7%w&$o$r2MJzhebc5rstO&ja3&#f1x^)n{#t6I zcn=_Bp+Sc)8aXEY^|M@g!?j`Me4ZYqeg@S0=l`+dRv5?rq=4t-y3nWr$`*N`@A*3b z3^o5AeiOZ^f!3EX!P4omC-HvD-QNf|edIBH+KCUWhc(uhXYh1Dl;=ap2O(iNSDE$Qc)HpU1ay3_Ksx#{Z1!PD$z z16|?eB>O`)X`5`MFCB=nFA-j~3!+`lp^`Pq@)TWizjhk>PhaUd-r#?U>{%UImXG;n z3~O0ec13M;rjfmfKA3Pc4LlSa%NWFwsEdbdIqFbNg4S`3)7??8e2!>g{*fgE-!Fa) z`_}`CZl7H*Tl zH8+kGL=EErbNC$7%<+vNzY}<-~KL_atRIuX;Q}{+}}mf1GSI4ycfC_FP{o)oWb1@K-=T-!2=FW_nSMzHOF2 zwI1%%&RE!MJb^us3T~(K?aT8pL2Z5~Fgc8py~fluJ#juZE#-f3>VA>N=mnK+5Sx zp%Km}^%+O&67qN{lJSMA=WijCxJNA4(Ba1JuQFC><1GR&Za9gA2BLU=v`(C0xs~xd z^Y^Q6IFk7_PS)vxYRzLt9AmKHAKB|VbthW|6_rq_E63*`>sKJ3IU>KZ@eyzAgE_AW?MgUnyI>r~4yn!8OU5HKh`=fh~zJRkBY?Pn)Y$>`Fi% zhFetY;5SVk5~wZ+@%o`eo{yJ7rl<4E=m4MoNPo3YDZ+#v{e0Z91CVe*pz8^X;+?MD zFXS7In}staoPWxz6OsRIMxDAP09*d}mnB$$8-RF$fCAUQjLVgEL0i)@Q9zY=&7-A(n@)c%UeGd%jrE+ z=1jI{FA+8F=jR_@2Gb3^%zC@pR=`Li!a_tz6KMmF&limL+Z<`rz8emF` z!Tdjxb-gjxYZStpL9D*}7Ee&oj zpq*EQC371Sy4*O0%N#C$A1^HJobSZfwk|n+BC$g{FK&{v**2M=1;3sF8?wLEZ*#nLdoTRVeoykWK za+*NqV)()q|5Ars=Uy1|yr0^xbPk_3BcNc7KuzW*u(*F*bQQGDb^>Cmai$ykxZBD3 zi7TABWjz2%2o4b6;a3(Wc`7oxQeUQ3!xaZnp-1o6eNP!K2+X{WA7)5=EfoZp>Mr_F zdf?rBy484~^RxFkAR%G|jH!$72D$^PB|p`?hy0bfQ=J7sZ~r*}e}~)Lz?gAwd|E2x zB7BilwnvHfr>^m*sXuYg48Hq~VL*1~Tqt!VyZJmI-t}a!cc<{?OZ07%Nt|!XG9ZP7 zOf?)d=oZ2kwI@tq2a%(b_+GNKRE`{&VpqD9`TgXr@WoN;Wkb<^V+F zI07sM(9vZI!|J#+W2XXE+ug=Dd6X3x1jYLdOT-mi0S`i|qDb_dL{&37(r3+?X$yVi z;ydN-+fn7WBm!UQ=HcK3D3ON3DCp;nkuapw@}tydxlndJ-)n4Dmhx?K!VRMCq$$-5 zj?P^J+UF4uarF3!H;HM_V`^zdFwnMqA%E459j!x@jbRM4up|dF1w3#KA-OTE&wceA zrAqez=sgo?Ig>hP%wdy>)Q_LT#o*R;>PAx|yrFHd?QO?zP=mD&XN&J4>EKGP5W5xe z>&ig06S>#;EBRvzzHjF8!sm+N_A6*^c?BdT{#XLUKHB~ZpMV%sz!4+|YOwn!5B8eQ z8SKiM{$DqxO4(JIdQxH&7hGGDjr;KI;P)6P56x80RG5#b(HW}odjHsWOF8lftmT}< zY0`H8eRi*YG|sPOYx)B=V8))vs;R{oVG^V6)XtTM_YnL{k2V-^4!SH+#mk~3SWfT% z!lH%!PGxYsVbdI$tf7;3^zLf8d}XiHCwgyA!xD?N+6sGcCB4uI;7QY`H0TRo_D*h6 zVY%#v5Z6sbHe`uruS(s3JoU6)0OY7iO*ZVVGKWw60^;R=gW*e}(v6LnNo;)cMUZFI zB#?8eRzOXB1vmbJQ~?ZF|2;PA>)1+qw+TT+FKg6h!T4@$^*mis+WcuM^|LrkPsS{ruwN9)R5 z8QjL??fijmM}0v6aDh!Sz7L4I+SmE{rad74C;!2DHjqN^UcEA6{a+Gj|7*>AqKPRp z@vM+zs;!G5nJ9EEihXdgvA)e@(HT@!6-8q9BUs+}V<3@vc+QlT z$JPQIhZa5e>&L(R9N@Gz=0lGjnWxKYIXRay!*>f{dyc#+XwlCLxuz!1tIp%E5<*re zW@}kab@a!agH|3(F-u5g1DuRRow6;3xdTZ~?e$^^z?mtG*_YhK`!l8LJbJ*WY*|lYlP&mer8E-1>(KNo=Pe~^1c0q~*a{jsKONbkAR}Yb zYQy5wXugH1t7%}?!*Bm&2Wk-1Vn3V!lx}_!0Ikc`4xRD_Uz^Bx3P$g~^!uc9>CO+k z#R%9#^fvKrwjU$wWXIX!sw~W3G)&wZ9}h1K1uFBfW4clW2wh$Ju0!Zx`{0pYrwGry zU%LxorwyCCIY^m|ZOBle-iDGnaA9SYRZ%rw?TC0nV^dAV(ESrRuZtZXnp9tyc6-7< z&&bk|W-f8DV27)fUty%)l<7wDY#r)`a>>_7gaOUgxpn= z-hjW!cs&-r^fVIpyV-@m)oCM|Yv8^SWN2qN8uuP&atk9$*oCv7#uDvyokqLynwG6UgWCP{~j8A|M zDUbEM_-yJS5b^L2K-Am%F9Ly|Kd6$w93Z%Y_VL>-(tkm8(N8gxWccv=+oj@ItISW~ z%*x|lL_Z3f>rtDU>zeW~DC*ZuC)iY(Ny_cb8>91ZK@s#%tiAAdUdck%4=BGiuvS6z z5}zKhI9A|FnIR}rtl*32Js?e!Zdp@Rh836|GC@_r6GwFBRo-NyS=MQj{*>s?59A;M zO30lcOnteTyv4q@?DEJYeD}C?No9(?tQ>eXOl86aO)sY%s3t3*{5YA6#n`QAk!8!+ zMl?*^rzJ$oTK*M1_LODsg}cxdNx`I+-~3D5cQG-->a+OQ=SgUxBjBF9SSZ?CkgFBq zchX8P*jZsD+AO6~!rZ1Sd4s?ITX;ZqX*!o3(@WuncQ$dYkBOK=4W$LcTu`Q+UuW+& zz+jm1>9?3F5ks$4NIVK_6!+ni1_(gQtelP>K51S0!Zz>4G3(ui zue51GE3bE&eYg6OrHWF#^TLvw%YQ;-e4HIf!AeM|0Noo-N*{PkAP(|mXE@}z1H_o5 zCb+KBb<2d}nymnS8c}ceM^6ANbw3c0l*f2J%QJ;K*CxC(NGp(};)}?a1R}qSF3l=i?eTYyP*m|u)EGD#4z+eY<*Yhs zTHQg_&2M#At*ZVyBqB)uqk;zE#h!VHCYiJ*A2t!etM60q`A(++K052{P|0^fuqY_F z1}DlngEfECSnJXj|2?$<<**PjR~lPjbN^^IJ7^0R|5@^3ivXA1P@GAh`4gae=2`m0 zfV_ttQB_rc#@47j9bb1_F+z8N9Sh-lz$c0%ykeaps(mz&h1q<4f&lJ%fsRFLE zBlpb9ipfzY$pKM7#{I8wnAx`!DnjYW`aZUvpulTy?bp{7O3InF!q|HSX}|b*MtNEX z1Q5thTihMzFY|lLVzCdijp-Q!C2w%@YF1dL_n3LaJ)00l@kjmo<>4vfZhnCUKPK<3^xpW3(S-mn#~h>p*#>cOsa!-}dP- zKjn0;ZfLv&y}mE|=Zl(BPJ=L6@joLXx$lQ-+JUaM(=glt)xLVWt&-8Xqc3cm)M!BS z>-}}4UJt~hmdimoKPF9t1e&Mu zaFe#=jlS!e-YBb2fTpD2Y0b-b`8J$8kR+2rW#j#%V`d&njshMx)rPqkvbOZwA(FB; zO{@tkhTnaY%1X&svnD-UAr3k{UoKcFD3F!lV_`~eOD9Bfh>**Kbwzu9*q5^I6an2w zyK$?|8t8AIX5~cT34%&BMyU~41qCc6Dz`)p!z303mF-$|0W}Dhz1G5jg_UX0K;i}B zY^v#a9Ejb+BPBlsXap{n9gbkC-mZKr2lV@I^dWi{fwNwT)SevAkk1Sv(vp` zXHIIfimnWk>^EdMmq6yvmaPbEQkWXZ!N={G3JG-WWMqJ??sZA!b6Yv%VwH({;_ zn23H7C`Ga1as&kR?UkgcdM5c%ue^RL0 z{y~mnl`s-W&O1KMX5)n+&%U3#b*Rj$MLKuX#O136ka{y#35jrZ0(^oHbm+K-1)y^z=>d1nHmL#GKW65Wd$QY3$1*jo>TnrQ&Wx#70f=dM5=w)P8- zDItfMdz?P&*O3+vE5b^B6v>%Pr&&n-&%Yg?Z8uEu_&3={@!I!wiVTm&#F>7)3xf~rH^p291 z7}C}I5{wsuVE$%$LSk=V`$_fm zW?YO55llvdo!>D}&`JY8ZW|6+pweEqX>@^=%`4X+q&4DqY13fVUA1yu?1HBs~*XYgW}d@+9D;qEhRmUBR#Co-P^dg@k}b#e*$Y z3CyAz3%*yOGWM6zY@PJNcNA}LH^u=0W4@Dh-mhOTx-)wPmUC@euXi8FqVnH-&Hm#= znYw;zNcPXz^NSP^FMbjoKXLc=CN0q>qgtN>r&0qpPXLgtefW%c7@g&IQ=YQbzJH0j z>pZjUI2DWi{0N!jIcDxQRu&7f|Kzf0@?v=9AeD3zgL%z269#%J;=;AOsXPk;*!-6* zt8P&*99ZIG&SJwbi#Qk=naS_9HFAwS%@Se?5*ZyZ+-;J2GwT9W5}@3d^eZz8D*cI1 zGD%>%f6sJCRzUWjPRs_d92Zx z7$aMFimF|=32D1EHNOtS)V>2`NHD|DjezS3*b6+Z)X+?jBPo{^_Q+&`K~>@1QF+G)i-wE`Z2K+F{|0Kt1>HO%CnxU`L;g7abqGKP_j4w{Bn*5E=r@m72NPGe zUIU!^_FyQnogNhr>82X=m8* zxFDfstnTyGp7ye0=&<@iejR$(YI{``FeaSevY>k@ejk-87vg-G2X0-l##8=6RZk_U zdsiy*z+UMLZ;~jVW^q}vR=FL`d=JG)*MG$?Gbc@xapbSd0S+5v|I?;jmM^SF%^xY-J z#eTma?}|HF`25P_P4~OO4);G+KqTS)KL@N2TOMEyVT?;20QMG(q+I}=r@2-+owqqN zatIkD1jC_nw?cqp<;Yk((ugWuiu-%k3$$=^;`TOd+?G#ibKh5pOqo2urFSGnAqy5G zW#xp#GVt!JqXrT|H38?4}KHNFU zh#RUtqNBIYCZGNz7T?xxfnM{Gh2+<*1>7x%a_kwuoSvB^xIQ-})7j?sK~g%D1Vdy5 zWvIpjfvH+{j5>+NTgbi%(uFZ;7#@Aw-CNbYZv{X1^^AIRW#RpP&u?Y3wFN;1R)aCJ z@wjRaz$6Tw380k*rhKtl&@H4{;!pNMHT3kRmNB1#?GVmVUnk@L<_8(+-Z0@cC2 z!xVudYQ>>p3HLhBU0?Y^h>a81VVk-gd_S7)h2P1NY_b>a8(6)mHqsB~vc@ANo&O14_)hs>O(hQoKycDH zh0*pZaJpQd?YZRC$4atwjxfw};oG>vK@FO{m$@elAtLXD*ebhtf?N|%F5+o>p}V*G zzNeH4ChyfexuyYmg!@eo-+YhfEdvjM+AI86<$P?+V6(@2MZ1})(GgJ|Nnrz_v zbl+>u_#%hNM2Y{ag@*R4)DF%NS)NE)I@&n=`gmyAAoZkE4$DA zO*mE!IrUMr{DZKcIXdD0slVj^T;2b}2>0q;57v@=9+3HJWu_XO=w0__%;R8VRc#1O z15uyaLyc3P;E}U+b?zZF;pP4%rJHGD*S2U=eJj?xCpGo=^WtCeUw@^MYn={}IhIdy z2iC=gADoHpJv|JCezwBXVasi5iV-^I!67p1rn$MM#3Up#E;&rh5`Qlq!22fl_K)!J zOnh8^(Kwe7lRa;nV%Qtq8dA9RENXPa5AxQHZ}yZuR<{B*lt`J$h-^xnGCm=4hi}%b z^0~NnJ7EXx-1CbnT*Di6gE!VOp!zI~2sSAdwzX}`lCbX|W|772Me`D#>e}m#79t~%U2Dye zB}S9vdVeHCDeZG4AC@WS`+mNYlEq(@_s(g2D#!%98s~^2uIE2jBb%* zOddXsYo~l>d91y&(gwByyRLj>CK0odsc>?b<}|aO4j)+sFHesvp>H{ntoU-k*mrcrL~5#r@=)iPVTaLhunT`4{EXusHx}+jI*1-RFISoV z_C4$*=eXIuCGiwX5_aAv4v@B32yePZs$Eouz(iH@7EM@IyoyuQD_u_?N}eB{o*@$oNR)Q79^G z##LQ@=GtzC@hMZD`c}fRtGfltp3PoQ{_6)3RJ|UQ1>=Wwx9r($QS5eaO`!m9I6DVN ze(zkYSWSI>j^<%@ZD;s~H{x;y#aB)JU3#SL&JwBkX0g^%?E!_j$-gzZ#>wt5n!3ET zJV=MGI0Vb~x-IvZ_}+bb6&RIAGzN2F{2>*K^g$K#&;6-~kz|}@ttw-PWAXC%S5_;e zWXyLtSn9T*Y5FV)mQwjn*xB+T&6maB=Sokfl+>G$L8hnfb+{8-F|AA#eztNrxiAo$#ey)HyS9JSdr05 z3mPvO$}~5GRMOU3^ZCl7Rd3CU82#U`+y90Xj2$XoiX;1b#Owo&$3V{Hvi&bxr=JrR z0j4@nq~{@uo<79S5M$3TmYuw^+?y?O&(OpC<+!PmaGa6FRXa_6?XYis02ovbg4>*6 zR`@o)Ibkv59z{Kr#=qs0Fh?qdJbT1jKK~CC(^OIi0t=VQHEwmec3D{MKsh5L>zbNk z@9GX2p3s%9oQ_J$!OMG3^v8kd7eYOxbLTrAjK%}}S>LRP~-4`#r)`!L1Wzwq7RZLTob9xH5`H|3&yb zSm+JV#Am2H^@6w9UnGk0sY@Jg1gq}9ka{V=%IsP1IqPtW;kGhb;P=$d>$(Y`@UxS^ zcXYiJke}~Qob?^X6MtMUG*#ppll;PAqjI1a`$1&+`x!0MLUJLW?{f#%Z)+};g>P_F z;eiEG+1>M0G@ltE8XZ1I?y}bO5?X&oUjE`JzR&_#WE|Behaq!sU3w^wZ?|`M$3%=z z^2lb(E}%E8-zQ3(&cm#yE08wrJ{HRvRBw^dqk4DI(Y!bIe)nI?ZvCM;E3a$x|0xf; zLwV@605_X|LRstLzK^5r`8S9)W|6ivtl}wq6|!9jLhvN+9<7j5Jho-y6s8FN zWvp0?*40_Xj4%FYQ)ZY%G`r!`!U6)LYwbeA7_Xap*kWcGtXvtczTbNKlplvAbRot) z%Kjw^H?ep-wXW0wXj6zbX@Ht2;E);YZSCxIEs3%rA$xW10SJx*O+11m0nth^om&wR zTkrhe2OQ4FW9}4-GPM@%QX-CCgVt{9j9Iob9C221v35LH>(-&*;W})(YXOdFV6G;Q zEz^eA;V-G{=KW6GBa$RN!kIDwaEZc${VY~@nc+gVB0Dyv!LyutGLe=ILhr3yigw^N zwX7#Zm)0)#ebxfcV_SCPy`_LG+)?=(M}HrV?2z5Rs-^KZ;ivUz$4SO-szH~jofR9W z4Js4a{UpLA*g-nu0RaTG}_ox_NNmpya$j zH|E2aPGNGFs`~mWI5^mBXda-<89j;)_InOd`Z{s}yQ(N1t9yD~9d1!a z%X(Z4-QNtUQZuE5dwk43%S1@&AnKe9PDmdyf!BF7$Z4+5gs}!e5YdA zj&-+Qeyl%tIw`O`cZ!Slksy8jm9C@1nu+mrlz3#!t_wBp$xGaV7f0yEUyjeH~`m$e#sr{RIeLF$WQvi4fVWczi+yzfUW;4&m3VddX5rOM7jQ2lW+V2p;AuRu?*Kpup4Q#*TPNwf~ z^yfuZ_^*XQ=^RI9YC{D$0tsiT$amGl_G}v0YO!I?emYM`F_D{zc%x;kC8%8B+M z6q?aQM;4?3T+=T?=X;qR5KBtT-7ao1^`I_@u;dsU!WGGd(tl4hQV+*amqvtywmEzQ^;QlWGj+d24J4 z_IZ5R;YTL&y%yJZUw-=IP;YbD|757mt;MD6w-ixE^g)>S!7SBc!=q$;uOAbWkc}3*ouVy#zC)nN zaR|K8OegVQB>1V7*;22(tGiCo>V-+3tk<>YygcN06E6r_vJV3euSpL*Wmn!TpS-~# zRN&r{o;na$CHkRT(>q~Vr6VZ)6&D7gOPV7kr<hgDc6ID5~*BR{%?(ZdY7^Gw`z`Qaj6Y=QR@sT7orGF)s5J zx)A|_#H%B9g7Ss;ltwf(kZMRLb6#1wL;VH(;x-aRcG@bQQzy(o<5vwhcm%7~8*fH! zu36EJ2=MUa`~7;aQrk?RQ-0aQ)mx&$Vlq2!b@X6h(DSIPn*1_m!#!bbzt=J(_5R;l zACuSk9#TsGue*l05Ifn>!z#}auui!b`5LGhJWj5L`(tZ@FHT9BNY7cr$O(3 zNB_w*zLvbHWmst;+-@(TeMN7;X`?RRpd>mNLqXX6G9E<&XQ+TqtFVZH>SGGO z$Y%?lAO{3SnDCDyzv^Id*HFW41-CS z@CSNNKB^*v$!fpBW(E@NdbU}W`&FJzt&`yc+xy=aWXvuNKzX9lTll-t*V?h`zL;9B zm|gdLv3Y)hO}Nv2yW+HW3~JtU^YUsZLXtT-gg4Za}3c3z>osPZXv@|H~_N)bc0ABWaqjoVWwi( ztXxl~D#$s^&c?~G;)-)Fbq`G}gmjd#(|PHFlLiwxTaJeJhif!BhgJ+~o2Lm*D^0N! z{;xU0x_Yp8kJN6?qN96EAI#X%2Y&Y^bEowJolZ9KlhO7)EM){M*QL;d`V3jFK+q>2 zap8Nn&5YIKbhwt8A>+pp%2XSHJv6Q(pRJ+)TD}OyF%&x0MZHoS-%R88v5L#GTC|j? z@Vsmy_%om}EGPp^_^##0LKch>67(5twCQhC7^9r8lJGtJ+&~b??YgdDeMcDZ#mr}; z8pOt%u}s?M3F9U|(djjLwttr@2P`@7f(`9)0g9p;nnlBZx{Ut&g5Lk@MdtWW#OL<| z?2mrHtQt6*7Y+f#SO9T;Q*)8#lEZ5TU5JtCGmuJdj)l@$#q|6C0DUZYKa+B$?wk8O=f5mG^+nvp@wrfR^j6$wqkAD(a;BMU00rUeeHJIjw;Uj(p)9fXiX3b;`*O~k_YcDLKM z3Kv3j%z)LR*BzIuNOzbh20%_d$pq$pTzcspHYt$fdEoOJ!clm3y%U6B8AQ&agggCyGv24*n)OM?k>J0lieg}FGL z765L*)+x=T75tk2v6nM(R7c_Nb(#t;_-Mj-TX2w{z*FV4wGqC>2o0#ezRQTj#Y$0oB}$pqAHzN zT33T1ohPxMxN&jI^#W}*^pG;GM}`@q6iOEI^5{HCEO-1(^ciI1Ch4No!e7+&GmM(s zeKIRovOQUupkLb&1|4U4H->}%?GXB3kHYx5A_ruCSFf*-LZpI+N5&)bHw48gTLHi> zWs6o#Bc7YaGoW!RPvm)Wl}>mL61KKhrO%_kZ?PK?2WBXwg~=tg1tDQq!|Bnch1}8E zjz~5ipC(%4Us_-*g<*LVZ$}2LXF^o8&bC99lxie*{I9r&}2H)11EYH(!wMp zU&-C|T(dq;`D}=0B=I=h^qJHXl+MCq9Wg!}F}~*np&$D@G`XIW2Bk#x%P+Y*;iIc!i|d+n8C4pRMQh*2jf@ z!>^(7cmiqt-gnyM0}a;d_=V#z%Ut3tWRfWytrL|!fhA2v`E(?PiNbA$%yGB0^%^GF z0xBR@O`0#z!w+b**81cjBJ*|O7JzmLVu}I;SF9Q(NQ@r9iwgBo-Ep8 zqn!FD5}oK>je0osA(rEv3Dtt z<1Z=#dz!ablr3(@e%K##bYg)*EdNUn2y(E#LRDpKqSJ!yJDVg8pXhtjO?Lu24+{E9S?((fE?;$MXC%z+g&M#i>$0 z82@WBM3muC{cNcH&j5?zaD6UlT}_zpcJ;enQ~zRw>o<87e{G|3Bd=`pHP?zdC8+!* z1tTg<0F4y6AeOP0IYW=AL9CObR+Dk14vODwd^NgPbOG!JGHH>Q^N2C_U z5%C|v7O$*;=FmrEwR{Ww%eRU)W+qwZJIv&z7gJV4IXc4{k6Vwo9`=1h%beRzf0tTa z;CwFA%`L)7;_I_#mk2jw8oDCW8p@Ygh%la-u3h4uHzO$+3OMpP=Nk$XNyo8f2PZ0a z5;TO#i}6RHOaP8KJU;#;dX~Cd-OwPqKj2e$LFjCSe^zv0nM~KJGl+qGrv^qS2mD4Q z|KzI^q-NpaKr6>Pmc?WAe6e7PsAyeCm7ykq}3@qD3A>IXg3R?$TpI^U0k#Pj9cBP^Ua?UHH`||x z3~_C1`Z5A%7<*v(1#3uWW6VVqasZWyT)o_#W!0p1NehO2wgTA5Gvwd^DqLe*oT3xo zMB@RBRM*Cp8M^#EfU?jvDxB#dpUM}ls|~g-8eqrxhYD#uUDfG$YhwOiHS4{4zH!$K zx+Ve5ul?{41$a7#CnxEH*n-|iMoT9$%9%o6z`L>Q&zc|`xo@lBc z1Ji&cY%A86aRXFo@-OkDmqI^emZf{`4>bvNHDrU_FmwTw7b@_dgATOE!u3yRkG_&v z9OqE)CF3R9G;9XFS-wntMvkA^#()&uH9XH7sK|rdG|xIKIA!C?_)%_`>2pj1Oc>@r z$=pIj^I4(|TfG;(FOPFZcUJXPLJZz@R-y4z-*OdO3QWXoXl4EFowkR!)nwVY-uJ1_ zmX1yBOy_|w_N8=Qr>+>%=H(N?L1q;fw$OvBhzHL`h{PF<>ogfkQ<{ZB$I+aj>jZ3J zX6G(*fY`PqYi2P>E_S4j5J1giW$W5MYM$f+{A&Qu=IyD_~=4 zy>gL29q)%+CL^p|;iywjou!qYrG3iK| z2m%?N;@7WWmjiYaA_o0Lgs|{_X0=Fuz2BRr!p~p4_$5c5Zgx3k@^4*@|LUR(9?3j! zJ{gp_UwU+fEQy5tU=j8%itz90>4AQiB_Sh|ki_Uk6UBw8k9|?MT77A!q4gcDeiEy` zJ7nW6BJ2_$DMf>r)tqBNcliC`K|XmVOI5L3A?)QnepB@qv0%DSDUgv88NAk&%OaEH zZ`M7Oxmr6j>wms@%PBYF4}X|k)Of7Bwx(F~4K>lfMhMkV;%a+E=V*&ER!IT29w5uG zBKGAjP+$8@{Q#0uQ!2+QclhTGI2md__dY3Yjo-|;KGA)>NU`)Rmf?KSuQkCb^A7%@ zV^g^~+W3ecKlADML0277{b~x^O|G)L`u_c>c7k5fxSZTs!#tP~TJw;kDbwp?hDor> zU8xhW)jC0;K5j0q#?EHwK0dd{0!afK?0oDD_sHQlnXmA);Nj+!i?3QV#2CH(oK_=` zFNDtOtd~)#p8QSP-MnWlz z5WD(G%fV-Evw#B~x-H5Jtg{@dA4BMc-|h@3Xu%RP=iLhgH(YimvUSrX{ro7#*n6#x zgfbwYlH^U8#h#Ji;brrW(>NA)-_d@HaN6O;1W_yTPFdRW+VCyx$(XV*#Scy|jE`XD zTA&q*;C#o-ZVFVX0iUc6;G$dZ3P1pu)%R~A?*`@#;+1n+TJHm6>e(5 zAU9mK<#<0|-cKJ zj`OVov2qFqwd>>^u3yV;Ub4k{9cBw#m;R(L}=KSW44$&MvO~eV6(Acqu8z zJ)0+0;}R-v(HD1!=DK=%BNG$B6Y@`eU_L7X>)mXA4(Wm|Y`#a~6|EZQ zEjJX5L%62_KA+XyYUIkMf7k}6e3p?uCD#=5%G+WH&^S9M$|uvjUxv2HqxuBVt(i)_ z8`zPJZ2L+|kiAY^W>6l!VCRc>`y_xEr`*fJ9=nR7{D*HLWjz!Cw>&#t0$0vGmT<~B z)*Q3A$-1FGAT?bC5pG)S)008H`#hzQ5^l^$x@hWSeOAEb5)_+LLm> zy{`Aj(Rb)1a5*p7EX<9H>j#waJ(r!oeykefY}M@lp49y32fKp*Wk~AKX1tI`NU#h$ zgKuCgoQ#wd+f;XWCPR#sK+K6Ra6dyhA#4EU+CcTO=4W~;(7Qp)kM|AQEq8Y1ZOd4X z#zAm9@ElH5y){UyD#x^CDWBCE(*|5Kp%Xbb8oPIngfP(UQAaqO6r~dSLh3r+T57z5 zZJxeCo+^pS-iPLWXH`glefjm)#D@%iJ+wM>GtZIYZMEnWi& zOrnYTc9@5@_{d)kw-L=Br}CfT${9}_%O}QwU_WFP>EO%YWDIophazfO{&UbTuk9Tr z0hP>gRIi|?@ElQhe$jCGN9Nlaw>a2byycGHkPXgo#>!GADrW0jit)rJj>DeZK0&#- zJ@kTEl-5w;ME4_hxK$MtAQwsEPCFyb&(B|sv-#0RA1D}J;ab;8E1kU|SSig&Y>GT=R)I)RFcAg$2Y~^(mIM}#f zWP5=d;c_6}!#=hynk0V$8+;YG?k-$#z5qLDxwfpAn-f0#`Q+G77vg)l?l}Jez)fdX zML@;m&J6$I{hU7Ezh4*s`SFFe4IFxnj6~}`#Kc!bMu)E>(sjgLoe|2@ely`fe=5X~ zs0WT=%wE?pf{5wv~~;&HX9 z>&aSkuH?SZz}m2uMs zNZ&$NqtC?qgYsvkyg8+#9Tm^+Vq`jyGs#7iREf@@KaMWqu_gzF?w^-%cU?Xlw{9NM zy%TfXe`a`*^L;qaw@IMx;8(3-^N_l8O7&&ghrFe{VYl$Q#C~akIaanm({vZN2pu^N z37zLX7R$r);}{`I5e|HLH!NX8PiVJJ#ZZIHNsv<_Ske`j^D%gPSAi3th3?#@I{a2c zt9*BoS$K#1XgdDZv%mxeQav~BdHqMy4wHrqvv5VA(W+bdLVv|+cB#(W$E)wm%Qc%t z=M#&nzt9{i?If085|gJKJZhyX^Da|xgW_iC+Ty@MiZ)p?`h~cP9hQk@g)$s;m^dim zEar-erWJgmC>{#wRkoZ|m9DL((&JTh9X5QDz%c<4w^Ja`#=ebCV2B$qu=x9;bGQfb zji+>s)pO}8K8C1LqPAUZZzVh^VfQdym-A^s;9MR;s|Zd%{D`-v=E=-2q=Hue(of$*1Of-|VBdOzQw+v@AH$78@7 zIqPh|$^H#YtFI{frArtZm4xQ!@{T;vpfO~|ZFOukH}@u{8F z0RL{;jE(1SHYvN@DXbvLvO1M84u^RDF4GyO8L2cX){BnI!*C~wV_t0MFh2W}0p7j` zap4?2or3y041i@+)CXh7vU&u)+X@qcankq~8QMBIy_UWBgJ=f|^hyO=?)i6K zadG{*BeB$i>6Sx|8eSvXfr+9YMB-4L1QmCPOD*k*auddY^3i-vI0sdp8F~ZMAISQ} zPYk}0Zg;w?FgK#TOTcBCsd9*L4Lf+R^L+dcQ!xy^b^}MpbZR|k0XVNbM95K*Ak z0lQ%rt#lsowutu@E6-8k>7@lCL8U?7+^cm~pCLvjsks7IOwB|R!bPH#IgX`MYKFn* zwVu!MyEy?0_uImvo;Eyer^j6;oG%JdFjVYYICW2Ll*Gpu!LNDH^$M{f+sLQJe5XaG z5=(zEuE<7Hk8*|$ws7D-6GCAs*6u6`U-&e{x*$Z-@jZOU!7A8Q|GRT>8 zJD|I46!H8ccU2HRP2oXX_PSr`PizGDU@WD#+=B1jNoI0A=s7i<)Y^Sfm3&8^Um0}= z6C;jGzKiR;A+C4&Lh`SB*mAX}-$LvBDX$`Uz%{~!t(4bxG*E3IsQLR=q?A7h*xhm~ zUWj4(?)ZK@c77$e z2=@?NbLJ?`nZe;)odAY;)>hEDQyrcs@g0xyIg4pK|7HfukkCEqP6h|fBXdJw0C1D# zm%&4xCb4lpnoefP+s|;EO^h}o(t`7-MlTou+$dh<+sC2q>UDOqba4y>3WpxeC>J+X z$aW0Gff|DE?Y?C+VSZEp_i2;@dUaQ|XCPb8uHe4u)0!t>FN7L1D{wO>vP zrs0^p0zQz52FAvv)6hz(f@ew)iaC>aj$YVAiz7+S_^`ZJDOARAo#T`(vcFP5WRM_} zf@`J>zmv^v`tnPZHso{NA; zGH-W3%+yMnS$mV_IpX-h=8px)QLWUZ@X_QJumS$W8N&fOnTdL7b{*Wi0IKVl)z6dx zFk?qfOP|*wuG^wmV~0R76r=qsG0t;>=K%&}!9q;?1zpREDnDaML#Vkwi$*bdbz=`< zwo#x%cLEGLK7YDc_`|Ij->mGAJ}M4y1|KI8H1v=``jZ3Th%&+ccJ3!4b??k#h-p>? zm)HN0G<(T)S8xc|cbztOlsiuTR+hNSUB@k1jv!=9u2Qe#OpVe@#i1d$Jl81zD@0T_o3P$rxEL^>z(N+T$JF_x`*3>e)>YEHQzXB%AK zY;#+_AM}3wJ_|P2=1ASbH>%ODuj*zRHt(9lhk&7F(Q~I%eb8r>WHAOfNpwjb6w*2c zWzjs16NUuA6Zw;S)o&AX}0e3wv0OSt^(F3X|u)w z0ZdLLB<4>mmFDP{ne)mAkS5FarjCc7{5ZPWIcf!R!8z=}=lEh!;NIKdVWVGY5i8$L z1n8S3m!CU1Xb@z5RsnrXfgGxUmPf^(u^aux5Yv>MLju(2Fx~KnHF|Q0HQ1+^T2ceR z#C1D%O#Gr|$6FpY-CTA0CM35zQ3o&|c{ z>aTF$SJ9V^J7pyXrp3o>NX5V|H9}jiAWfhq2?(+W0tN~l=hqE@vdhLR7*ufJfl9sp zxJx&TIKS954rVZO&qp;PK$mo+2n;Tfnw=%+pdaUx!5-f8nE3Wy`BlVI9CD(52M5DI zx){9BuIuH<#7?&f+LrDyo?o;riz~#ETb>1Bt2rQ^)65fUg<(K1`E8dVI(F-J*6@zO z@cd&zdcI#{65A_JqU3dF;i5ki-ci5#H`AQJ=?%UwcZT#Bp`TEy=HM^xrT;V5VG0Ae zh@2nmCg1a&08tI(!KHrw18sail1*XMB{JpZ*~lR*oZk;A-W@M?$Cy?~Y~JXDY|nz9 zDO=;zwhD|IcrQJ_dAsz67BZ}V2?J@oD@R5UUFSgN^At; zOM00qe9-Q8Y)Mr}=k<}%PDN|Tk`iQ--ZDD4EbmZ17ohc&yXl)g>ogUs#6VqOr6o720!FemH+i_dPVLaeW$y48c{Md9tQL9#=> zPOn$O{l&79-d#B>uQ#1$K~Z*#0)As@=`6zdrli7ejPZFWRKc6a8pEDv*MHk3OKrD4 zYiikzD~oYOl5mw;CqSq_RmME#PQ4Q#h+hFw=;>e63tk|Rqu0lH8O9p znyJclvyaiWga;TDV!9YN*OO7NRK{kY$$v$8Y?4WdRjY|g$#fPgpRZ=Y+Pm|sLz2KI zJwSF-Ad3e?;3&Fy*?wq}zdnV3+v%SAawd|vuTUNSV)5gZaJZB5;(aQc z@7t0ucSpIh;hXzPbe%4_N~sa01sI@xLGe#*uKFkHIfN!os5DLrdK6+Awb(dJqCy4< za9WBz$sBN76V#?mO;I|KmkCyVLOCCcW&TKV$C)UJ_&Y^qTo55B?GcH$>0hR{;-%!{ zZEl79oc``cX3y`1_wqBtVW6fZUwRirSdp6hy>B4hWDaWR^w)Y&edHRfY9Q=pZ02?M zKDiUvKos%g`b#3_=-z6xIb3cuNzPKp^sDyBREILv zvSxbNLoPjXNsd9TW~peh`M9cDt#n=i%8Z^x_oWXoJm#R12v#=$-X zsMnsOx7D9e@3DZogeOBHXODba?}#pDl3PC2x9+`th{hZf|Mr-jT?lp>5+Yi>$h?m| z2Yu7wilBFOR3T@QF=`SzYnW+?hgcR-snDHs4b zbL9;$q)EFKq28`E@ehNcf9!GwT5wYs@y147Uk5$`gJZg#WUx$>DA+#nn`CSL% z!zRnURJAG3xy?Jc13-FTs{E~b^ubXbp?h4r`jWa8(uYPS@x&%-d&v-x1`S*PltvRp z!;Wgm=#fipq1kC_B0y2=*`v8U;7m=7N%j3Ai-}wkpEoB=As^(A-YD}MryA-%lfsV` zux*lR*{>daqww&`J|h|N`VWLH)=P8*t5*e^z)5fo?9uxqI}!IdJR}yRy#`m|)DnaT z$$qEm|KT#J|2Zo%-xs$A!?c39An~|2ow~e!smGHRgi$7Kt=p9H&tgUU--{K#c{`eU z9|1VSm-J)701-E<&YE4)^h`RaobuDjr68_@R(vxh%S(?Tb(=jfe*?*i9}e6W-LkJDwE?QL`nH zF5~-T{Sl>TgO!GJ0NQ1uI`-*paBMG4%x@m*WLaur_)eS_UIplPkz^Bk-?Q+5P#euO ziEl0ixyM9cDwKoAp=YpqG9F)YH3$%p4DyFupRm#b4bOXkds3F)ZJL&ddfQFGL1uhA zHs)Dn1&Kpv(6<%B^YoOd@%6IZf>~J~50bSvmgh!RQ%?&@5g-Lq3!9Q&te-w)c;nVD zdNP;Y9AO=Xs(3%{ZaHOX3Z2i57JdOD%uHpC+f0IBDt#>QL9{%wOW*s5cBO%x3SlbY1Uy|S;7VTOUt9<+3$jZ~i`|*e@I(f8; z{wmX-BJ{3K&f=-tJ{{#X)4TEVvgeXKTPo^B{K z6~pAjP)|PePzw#_NK`qAmeTBw2J{YuQaG4OBhSN#*NS}n9MLkyQDTtXT8 z_4VaX0c4r)&txPiDh62dnGlRCh1wu0?+M#vQI;!+Vk`VDrUvrApcZeBUkev($vBTL z#p54|i6H9xbmrYOsJ!`0vpTES8kcqcb35ph^EH)!?M6fP-GFD=Z;Ei#u6;$#M60?* zjVT4)Ye)iuqE>52Vqx+5h1(KF_hRVOqcc|12`_?$^LrmeB5NFZ{KEI-ZJ;c@y;no)!+;NpWm|>kd9Cd3}MiyqiOu%&4iJC-e{hDX>CF<)eah99vNPl88 zVYrJ~lq>eA7|?BPpPucbcn&zRop@kHogwFVpEoU<^PQJE4gUFT@G}yc|B_H08;e;M zg`OH75QIlfotrkIP?3nhrgajvl?Y2XP#End_e0mw_vLT72pU=*TD*Br3MS~yekF#d z#X8HtV8u}OV+G9}1}THie;pJ!@V>ohIo%@|?$?63mL;jorhjp3tXwOO{j!khO#Kn-M4oit*-7H$9PWnlGqPv4Yl#-v_o2l=*#Rd z9Jryq-Z<5-tBLJuW0gK&hf8+CaF^zc%U1~Yd=48w9`YTLhpEiz`2upV;gadyX}Mpd zzHt97YTNQ&zrvQwMh8rC*m+%sI=C4ku!-)4PoKt$oZ)VN@(6D0ZC#RK{3=cLwlELx z3(Mw3NKUUV$>7^)v2x~06uZU%liYC2+yL(!3@oih`C~t9p~hc?%3u_CH6sVZ6C><)%BT+RqCbBnj9|K{Ms_FnGG+) zj`m6o<%YeCY9XR*ZCY>G5S=F9pOzW$Dk6rV{z;kL9ccBa*#nI!0#iwfB2nbp4wxlI z2UPD(-BvDmo1f6z^c*0paZ`~jcq7y)hEI5*dKyBldaev)<-oI-X{N$N3mz9y^U@?a zb9DyourFUoyXWR=E~qUD(+5d8|MKN`8(549q6B#}tW|$Wv!Hx&#xYg9KqdX)(G>aE1$`DS;k!WfV^K&WeqlR zcY~mq+k=-Ta4>fOJxC-UaeKWMiL(~TAS*2-+v#mTWkZ}T2r>3A9TK@0B=@^PvQpxnhzU2D5yy#)mPoKivkco& z4TU~OU*@OUN>;y`XZJK!D9_Q`ThiQ()*}dL=B&Q6YDkC;+r>oW!HATDJStC zvjn62L!m?A>Rf@-CWL=?6Ea#;^-ZQyOIfpo_@i}O0(&*d0g;{AL}>8udW%1Rj~JTh z(EqTb945i`iA+Vd(wkuR8tn&1OIdm@GI$E67rYPRkZ!?ck!m<4Ga+`s=3CbNDa~n+ zzpK99msafH^SCDhjHd5*9z}?p)eu43!o1ct?w8swVO8401?ZUy``0>J3&l6hM&%^Q ztp@|{i}kGoG3OUTzt_j9NfzoK2hnalY^k1Va%bPvA0A}Q4eDfy`woZ05?`|_#x=C( zH8b3bO0^t9LQ&!z;TFF`9NU65)2iqRR1w=B9pe5b6@~QG=Z)Nbe90h0G4^+i5<}}n z+}~Ko`hLcl5z}Q`nj1(Bt{PXo+AALBw91C5;+)A>PS)*?u)rd5Wxtv?f+3YZ@uRa; z?1S6vtPxxvbIs6@YJMg<2%9|7U+?!w(>zj8-g=i7YeM1lEWD6+11Buu;>Yvjl&ndp z&C8!1S#AWo2A6YmOWbW`wvS2Wxw-mN`S)K%lg0_58i{gnL#oB?qT$pZECHKSe5Pz!tNN0ip$;YEbHCscD zH(a06uVVSMZvPlrC(H{3aM)p-EpgpLzXeHV;vR(iN_by>I}~d{xU}{3YN)&1XKCwhG@s2jPzn`GFLM$0=!Fn!fo2$ zS|@J{H-;HED;OZjRW5w1LVS!8uf>dli+53eLd{pOUi!nLrUXfsKTfq*Us?4{(qR8D z%HA?8>b~n5rlh;OMMAopky1iHKtdQ&32Dipq&o#ckd{^kM4F)yBm|_Td&nUMhUWcy zp67KP*L&Z`eLv5`C+6$0`R%pWT6^t}q;qbQU>fgG=^qNMADDmZ2Tlx#>OS%dk~3?- zgCtB7LU=4P6+}Tmqivx-%mSmsN`_DBF#Np+0~Y0hqYHXXios~X`>!o*qLO z-U#r?e?=J9uf(V$`lvE)2rbULwvNskU`$ZoUcSaJldgfgFD~=~W3BQvzFl1S*^J<* z{kgt)5h&1l7{y2hLebomt-9gPG4>O<8U+}~e-|!_AS>hrucY?HF%xxB;}@~u7p$JF z)w9SqVa_mZM!7D0N1HD7rL9o=hP#?V8;a$pOa_<{u`sh5f8ID%9NzEH)Q@$Oy5;LT z5nh4}YF~3a<6;2u9+1y9oiB>;`JDzuSTi8|v1Nj>pyp4bW|9ap@-A6fMzog#37IbS z>{}zv*zsGGL%J;@L>*OHNIMerJ-gF0WEVVtW zcpkcV$ZLQ`yHPKq?UfrPQH|@T`d$-Q$g8EOVJM_O$iXyCN_0s!6@Xg_4kw(^~{d5}vx`*I>c^ynDMZ{sk^A0dT3{i!i^=tXa}buO|-LlcF0-)>cc= z=}U>fN7qcaTuNnzKQ2Oh{6pGDei+NTy)`ov2GpL~uqcS7#^*7@n%VqD9JZj58vXCfl$e ze)1uU_Bh^FxsnzB6i0a2y1G}I69a`Hpa+%qoh-E%ssH6A>D9g z?(kkWT4(;~=)O2(&O}7;`XW>Qfslp!Zv+`$B&nWwTJJZ?7~y%Mi<^G3_0u@iapfwc z)In)}96!uzVE>e75u}Rv(y%zBLzosPJ@!)+u7izwe-BAMUp%7rVNoH8{gO+Nxz&n- zW#r42V#vmv!(~v3 z!&@GvJ@V@bQ6Il%iC-N;=$4Mk?nFg4Hs`||I)uxkHYhoq-i>ua?cQyr_t>Jx{-k<= zhfO(z!F_7#DhxE&v7u(1(E+->PCRj7SoC9SGw9FzQkv3PSOMh~k}zDI<|xZ5OxM&x z`d3~vn!1_HO(pZ-in9d$m#!IXP?ylIlj^=^T(y!Fjk5w3-qklTsM3*x2jfb*&Enmi z9kJS@mEGk`O!J-C9qdwjZ&r`gnQ1@z10B@=!&YoSnTl@W^88?;bR2HMJBYxz*fb*6rt`1ZBp= z*us^LkGa9_@{VQGJ8)D6HfrY}V8fxEX4(x6INJN}2c<>?dtnM~SAo#)CdQIg_i%A- zGX!sF#lRSG+KQ$-KURR^$@a*hum!5JGUZ7)8Nn6c>WoTHXG!+#Z^3|Us~6~Dw^?cL zO$#P2Y$)^RbgkNA&Xvy1C`@)88&FV8MmA3O2I}Y$6ed-x^L&peG^bX%V?DSrj>2(> zr)()iS@fcM4TXKT3XsfoW{SOA(0b5#fu+0T?n@7U6qdAM;#AJ zuq?Ft`Lt39OTLU@KOfbcZB5I&*n9Wld1pbKf**-N9*)&yQnZ_Vc8AlKw@+I_o7A2R z(WKy(69y~@++%jdcPxw4ZN%W8!r0pTT8?kzQ_%Jn@|t)*Mp`VZHoC7*?=k86b_ZF` zbmhf2yvSCA*+IHCEI$q`KN_Kur^gfgQw}xK%+Axu2GPq!3!tDK2M$H&XwSK3QtacA&yuM?C}$J)o- zkgpR(qX=Gua-ZE3BAQZbg0b9=MiGtr7o+=@=LrX^nBHKL8^XpRrTTYrSdem-7;Hnz zcftpX?;q?MGeWJKGds;-;fp;|a*SJ#KD5LYvgBXoTw+!1AY!fp=&Cr0h8nRNGD{!h zsE6m`!aX9Of|5~|Vxg7~<1JM_R#uaM^B9V}S=7w&RFvI^-iyM;@a;F)%SyZ86Sc;U zG&OR1)Z>6%r$$A&5IPR8y&|PJS&~JV-Ok(P1=jy-AU(ClfxU_kYk~$wz&Jx&wsQRs zELlR7mY4gaA7<2SgeH9sX)S@2xm=L_!=>;br1v&J3yW{>5P?RWE!_$}Y5%R$0VNjf zc|@|J$9U+6+x*$!ILok^(3eWyKSS$?kPH4&$mSag@{us3s%3eF32iKJS|QGsGuoOr z(!FAW32N<75wr0@;yu?cTw(zC0#-dI?}lCBru(z1`dy{RsXA1l;QVbb3PzDegt(Zv=W1%3n#0Ug721 z5};2KXrHjsa<=%f3V39eK8=KJ7Hu>W^$$))^HocJlJShICyXLe(oz6zo*bl~ohA+3 zTwrFlWv{jQin!eD%iHVxq}3z=u7;~C4jdkS9Te#6htsM>;2q2CQF;k|mxiH7VG<}P zz7MaA#mSnE(kPDXExzxx)E2+7@zRr8#&>-xh3R0{Drm3jOa6cvbS~-zeDDT- zY%e)|I{wI3t#sm>=*TVr_2o@l(e&L<+?PlAk+7U3NM7TkSInH6%(y# zjIl`P5dS<9P<>=?OVR&;yw6fLVPTEV$?NVNR_Idf~YLU{pTT9eq|rOE^z05 zTR;Yoo{-1jvr`W!zngqRL-U-HilzQf3sYKOYzwoWN60CB{n44 zW2Y0L7w?c+`Ke}R$i}ZVy!Sb}{zg0;VG!BszJdtadU4$ZOfU$#rNzrhD@+JU=+vM) zGqFta){asaSpS<2{ilR8AKf*^SIUhi$5&BGV12Qf-y?_J^7mk`#G&p2XP+vJLcu&d zJmshSv+p$ikQ+>-CMSM~%l#a;mc7G9yqzk8qb#)VjFt&gwRmT-F9~0^CZfX)=ZWPY z?YTKkQ%hIN3;n&;na*jW&PFw23thi#F$XEuWiI#WO)m(|9PfF5;g=)!cO$=4dTo%R zZrF6mF|#;I=$O*#hEc3bJM2Le#6NVAO?jlGG_6Jj>k31kwI-;(sG!v^wK{1KqRw2t zd1mO!Eq7175NdZ@LbH-!RB#W58!3l-kIITZ{AaO08iZ%c>K^hJ$5zR|=7akEhiuj7R`RKgo7;1ikv3pmP9+6UdutV~~yhEp|) zb;)nQe}BVc`W<@#BE;O-#h2~fRjCT~wxhe4`qnl-bX*Mj9o|$S`6#{fO*IZc-F~sr zV%iE6!AkTZwH=*Xs{jw2ecH2dEex^jkJEh&l3G_&kz({n>&g;-=S{B$#{)2EDM`7*`%{{4HpV*!5tf`?V|u_lcuBZl6(&mw!p_K{YogPaV7 zds}6i^C+;~X@>;Z`}PX)eTG`r8lA|hCb)N>Ixq|an)z$_^;22H)mWS55E}K_KISW8 zg-uzPc9V9y=OJK7#oGfm8E`2vr1xg^BziZaVTst_5ALq99etG0+H_Kt?mQfGv zOLm1H-7$WjO1n~1O9lMonH5WH3~MMjL+TE!wNV705&`;I^#pIM0)LX0S@(8n&OqLk zo#NGpe|0f0DU{ySNsZuQPNZ|K%K?#~JdQd2hup(S%hW87eP_^CB8N(vg!J`P?2Z1u2uVCaU52sRas*P}9>y^;Pv=P|Q*20_pU6yop5x_v3g2%k*J$`Ecohfsm z(oNR>Pz3lBIZMRTxf8FgQ5c*37HEwJL!- z5*Xa44RuyJvn4v~C+|V$IHqu#hEKN?MH=&hk+c57&BNFYgS|(D60z~5`Vj4=R;)}s z;&FE%GAyrsL-0H^*ni8J&<}?WhB;b^;}itnL==_*Y;X_a$+~s`;bB-%;7tFI4Ud*6X{o zEyD9U8b&x0NB9-O7&_mY3~MG&#~IJWHNqLJ*)HP+xLqEQ(9@7K3a1ckS7#J44#34? z&o6&GINEC4`#Ko!>UyIQx`@e#2SF{g{Hi|lw<$N*%;o#jUoap1yT5$6XjMG#L|N4) zbROiZw`q^IJR5$U;3{N=Fdfy@Z&*l-OKh=2v-aMc`x#6=_>So3M$Q-8u_2Wj-I0R1 zs{FLrPNqpdh=Utvs7U=X<6eOY$1AH`bU~vNkIAZ(*%xL?QVku3O$NQCVT?@2Lf?j? z%b$O*CnMQAz*RGIh==UDl~asPQ19tgP?xAT`Clp!&b;6gPu5f;#q$rRQhC&jcP_GL zXf6Xk-QIc&63lrAQY}RHkI?R~yjYN0skGSm3;vq@fxjt)kh@KDgthN^i>#|E$7WB! zSW>dZN`r+l)T*ItY_= zC7-U%V|soXGC5ED;Aij>zT5q~)pMNXuPwc&0*i|IN1nE};cvWnxuy)(`|#bPC3fS8zJ4U( zL%FQelm&sdZv4oY!hgmPLb5Z~w+|0HddUPQBhx}}RLJkFeZ_9eH$Btv}6ntu59 zo0uzk)`LK8BqoTSFcA$J+6bqx*GROFJtutXe$ba6Goq_gmdW>E3`(*0WfiH947mYf za+YWTK$4CH!+B1QJZSMLw%SL1-+dz3BmNCMlKy8pTH*eu8ulALU$g?@ezMaPqGR0KBU#1GEDkoG9YQO(goc`1}gp3lh3T+!o2_+xCCtKyk4 z6Y}=J=P@>8CO`JF_YCd9uMRu0mou7FF#Qwz`qBb!X0Z3jsm>RSQpJnan?qe>=nJbP zC*9j;2Q2M<5jZZ8S8)GDAly>hpefepY98yo-Gk~G7uM9Q zb%l^h-}2@>UOyW^#+d9BG`tY#$}v%?UsMh(j63sWDSl3qyGOo-yj&A^SL9o5dyaGj z8{)w9JakQJHmL^}E(7|zom*omVr?j&)C{XggkEUSmj>*s+|EnRyM_Jf6-*NOC}-Cf z`#1&>wNXrd-Bh(RkqHOs2Lqp|CIC8yTtY2tv@0$n9$VX zZs#w(jp_X#uz{L?Ll`90)+vqMpgFQFjt&Jo*kOSC=w*#&w?_enbRX2Q-J`;r*yKpO zc7bJQNItaRz-u<#A}3o;@<|Nb?a>nXv?x^f8-}`5ZV?@Ab1J`5#wOl;)SNBlrg2GUFm_1~ zA6z{jAPm^9{j>%O55t0N>v@MN$+WIdyDX!j-65Z-fDG>MCS@@s8)D70O?Z zQ4d!9{}sv&W~PVt-Rc+Dm%hGBfh4Sq#3ZW}aAq;>^tw5Fj%6aqilDDBK9A zWvm1Ven<>^4_isP%>>R2T?IE_KrYmasYZ8Cqi@5lpaf5|b86oXql{l~xC>tH&9v@v#q1Rj8xDt=P@ z^e%`{9%W}MOUchfanK$E%B_v^Lp)Ie-@UV&~zT0natvkS&=k$k0MFBePaV$5)G4)0kesp<%$T=TC-s(uQ9lf#( zKI4jvVvvxFf6R$d+KrbXw6UILna;WkFC&VLTBWRCDUf~6ifFh4N7a?&we}^Nej$GO z+G+B*MbMu(#up>~*@sR+{ubn?C6cMT>p>LBCZgrYggHFnl9CTDev>tQjm~lsMlwGb zNYE!~tR$!m9!yf74)X7|TLxFzH+@Oz!l+TBB$fqfFI_E`XX=4rtQ<^8um>X$LK8Y}Mv^Ge^P$~VodArT~ zI1L1S6$%z4Cu2!XK}*#${ES-G?_=uAy8^5XVr`u?8E2=r3&ospdHC1dBr9E?%rA3$ zPp9g;0XF(Y!)*HJ{YcP)RlQ4X?myJp<<#Y+k@{b}Y<1d)k*ga-FbhDwhQYBhU~;V4 z!ppz$=j%>Hu$!?bUNW3KDWyQKz^6Xr1p`bF@6VcvorW^P#wsAczSQ`O8vM z!-dR|oBJ~xDIsgVUx6dDeHMdb$Gw*B1Tue88x7(gD(EMZj52j{)Vy?BTQ+k0l7usJ z0!7pPZ*tv?PuM=0T0$*M&|A?f&I~;}UMqn^yC3AbVfA6o5_uuG*|qntC817j<&GFM$1N3x*K*cqMpwjZ(an&AnZ%Bw^1MSYFjx zl0+*nkuRd06B6;gx#qa-ye{ce!>L9|W+<{<-cvySvVVZrTLFJFl87 zR(QdYtuiykEXj=)WI?NoTiA#izNM>^ghIjc6Oza2T2zULl-G2zj`tmo#J{di2fMbFFmRcnUOlcown;HMDwn|8iQH8j8lFp03pn$PJ{_}h-Qq{+}O-)Zm4`%^4X}nNBd7VZenx^~Enj{YNnl zoTN&X$=_E!3F7pLYz+jnny{L`;FENz8N$|I)&x1cV${@k_GF+^q2Oma=6YzEu~G&8 zqM6=OFZA-vyD`D!bg(+30yV@7wpq;B?atl?2vZTsNmk&0D*zpu$%|W!hion=7o&pS zxcK(k4XYb!QNPRl@={Opo?hbX%rCD-UsG342-I99$T`l2gJ<~^I4~_R&qS8*v#PlH zSPF(&oy-F=BkY~uV!wY8$0>%6 zIW2MhH0c%VlGnv%=XIFmXWPTYz+SEZl)MaK+c87Za&Beb#zkJxExJ1t3wvK;z6S57 zuw?G+p`3&w$D(Fy``~uy(pINUl6-v4c6`RGwL*@CtbIJ-} z$IfzyC9$i}0?&z|8|6bbTRnwmM|g8YUs!+@P#_Xo{^a~uArMKb{H+nq#{<3hIZcQe zPTddYUy}mQ#CQ1KOSNQF-Vw~L{B+&A4PA!v)e*Wvm}A`Dt~ft@tT;8AB!g5=)Sura zwi;OCTGkXMRx@(UcUA((ax?3IzTna?dEiL~GT<$Pqdqw?&ApS0rQ_g=0&r{Rw zGJjN@XHhvoMUbPhv)Te-iro@#VI}5IWj}U-qnHd;JFQvY`avx|zhT>dPUUDb5XB0z)N!XcyPV;t_fx zZhM;5OjpWeyemESnHNtBru(gGiZPeK@=16gmyl?){c1#J=4P9QWqmtI;SIuafnPC*Q1QA)e&+HML!z*(z(vL4W+Vag3UK0U z#HGg-am_E#xQr2Ssh`*LV)1w@AmDjBP!=2897zg8eRemXn9Ho_?vSbq_M31ZM|Cb7 zrz(g(+hsPM9~Hk+{-VKp+_g{W*q=m=`C0X;0#0t54MT3TGaqA8%V>64+z3u6gRmB1$WGdCs?;PrzNPCu~ zGD669Zi$&o0*<98zAWK#Wj3$B!)Sea1z7Q!-jRtly~6Pd$;nn5K(fcXy8UZ5^&$yt z?si&uW%RY4;qUT+01wTdO4)s}yKBJ&jH^85-A`pVPj;waa7U+Asq|XW@uI!;n0n$nD)eXC#J&o3r90a4m zuIZm#pf#XfD+)BNlOMPyDs0k%Z+j?`j!1Zs9OO);Mp}H{IG2SMCd3glbCawZ6|rh! z;l1TDzojd_j=Wj=DmV8%pd~aGZuCguo*w_(#u9cofs!EycUozF?{0Ou_Il-bS)D9x zo~e>^A&Jg&BmTU5YXsr~@4ELOg z5Hg8|Zm}S0l zcTf5=-^w@rxA`{X`Nj!-K9C`?t@rx)({M=cYG>+UYhTptV+_qPsrRDN4=1x`n04vS z$dN>iRNuNmRiozi87U;?ncO8~d5$9SCLbMatXgAv=+pQqx;@1Qn_!yOeijd1?QE{g z>V_>M$g?9;f3YEmxp19SL`Ksp{BN3=$tM*aMG$k@L4cXwUdKx+xK?*J{}-3re3)!) ze-Ye25dB=v#g~dEX;UXZ1N-T?4K*;>K^|tax{9fU@%oV-N|T+n=xIjj#gA`!6|$G; z_exO%_bXj38ubVBKj^R*l8uV93ciG-67@4sD;%kO81XOj`V_k8(lF@PfMvuIcy1-7 zrtV9sgwaYi8B->7V_f5$P3*JHELkA9aB@+NL+Ab_YF^^psYrLPh%XscsD600o40;Z z`P3hm0cc$ti$2}v=Ziigo&I+hEJyF}PJJr#1wJHh<=tK6x!+!F+3Zez$;``>Of-%f zS7-SWwpf}Mlv-rL-sLrh81lO@ChV&=_#EBFHF)4a|JB;hOs`sZ0#T#JmQeoc-pQIc zvV@^6ajRHqNo25Pl>6 z=Wi6xW^BIC8vkvpz9*fxw)TCf>ZLr(Kj=2+vvbtn=$6l9j_9vWPV?R1%V&}d`x0O$a>l@1-`=F(Rp3iQA>7CZ)k-e`N27idXFLF z@8ga`X@m`bxT_+x_(xbXtQjuHwTC1$?5W55d&9W7LhWhDGhOuzT`}td=s6@5yN-Cx z3aEc=&`GTk@YRQcnRTCxOg)^S(anRi>m3wID z4Xg3%_QM*|0An6M6Bi^(Q={^=Wj0iNoCM~Jjws{R$wT?);QD?%8uvr5D<&H<^&R zsU}yZyp%FV);5B7oa&If%6NUIry?h@&JM<6EGmm&Y}_wRT9u?xjDOZ2VdpY^;wyN$ z=X^dTsnc0{hj2JYv|r;b9v(|VLz{yyGJ<0$o=Twu3)TGH>k$$6)^L3cpwrzzplfNLdA8s8r4>^K;$k zh!i|5l_iNz%n5!$*S_m(j90H-jaisIsu23&2h#|6isa$DiHXB@8+sT!UFaHv@*K@J zkBsAm5Ddx*F&N%NJ~MP@sWhw6!Q{UTcxr`{Z7DeCs;*>AwM&b{-z6i4C@NK>5O=o- zqK=zy?h;ebvyEJlD}`h9cUUlgL5$YTi{@>vXoe|cVYB}gEPwLm_pAEDtq_0w$CXz% z^2dN*7yzz9)^`61p7&wpYK6NMR7Q+dN>rx}7cmePZJ)BOJtuW*&c~(5f6_v!g(pcR z$Q7vYsf+&E7-?*|6c@uwWC=Z7OLx+opmrEz6**Jemk>?AT%X`XP6lP+C~}cnpl8e# zOKsw^JC(6M;hxw`DTg<~et6(adgIh|$$f3YldEHkwfDw8ILG*2DpS@#vpn#SJ+%~+ z(G18Ijix%nfWq9NoMivnloS-}$lCGx5f~GxMvHuQKbK)Fe-e1K(K9JCn1Ho+=2E7J zG8X*^TIvdis-1oD6Zpg%ZcdIud30u`FNJyLQX@?_!n*t8DjvR5P$m4r3O5P=t@(jEn*gcyHQ4U@y;s|CoSa)s{MQ5N*cI zYUz1TpJL+LqK8D-qW;}ZB>K#KiW&JE!6luf?ETA(s*w>L>TvjW2!!H`n&!Md*|{9JEJ)F#BJ#&80?~0% z`-$EDPome|>Fo6h>T8*~S!!Yn;i59K=SThNqi;OdJpRrrkWRCd10j6IBM~HcdywM-j6jk)^YBJ3k(F~KE6_R*mvus15 zj{%#LS9qvuoq2eX;>mL;vv3&8V>nGE57vs=(7Z*f_YX8C9Xz%U?AcILb6PW$JH+$v zc@3RQFnAcRCY-QnOO;^82nhRG^eXJ{l{gtyIcY&+fB&8?&smd7GWGRQRUly z2J|PmTbrY~oXag8>Aon7*EVQgo$3d@n38<7Cptpk3eaa6YXs&^U?NHRL}6en42eWz zl3Nu_vPr%@IzZpf&rlnh*6tXx4$>Y~gPxyq6T4Tb6-YVvN3@#FYF0N)_ZKelS{3A~ zv(ZUdA^=D7<-p)y$Vs`9g9`a`XlVJRpuhB=Yg{wx5*(aW`j;Sf=I6=L+>cdz1K}m` z$aE*LXtR~4Czg)F5mr_E0VH5c_K%Yfw@+#^R4jyd30MmpGjnq{9L+yUs598XjPRmzwPrJ8AVihcjt9)?;D&SOs zp)ZhR6LwM*1~*WWflZBvUChO&m#^=rA0RV2?ZVu|h0xSSM>5|3^FWK5M!N{pbGwy^8(~yZ`Y@{k9`I z_6t75#D-zLz&%W5X}Oa=&t>WU)CYt+VYaWT4s5B9vnXX?TM=MOXE*SHrx);30jM7% zMxP)7f!%+|@2uY55bf9$>n7AQ8o^w{bF%7#w{l{J7uJn!_rnD*Di-CG&y)t*b!Jio$oGEZ%FRH?X-2 zJR}Gn#$?8hSm-KfW!rthCdC9m|^tw zz4Vr2A)?m}Q?ldL^{?f%x_=X${QI-)$wmfr08tm@`ac7sSMpE(i&tg;op=8h6Wzv+ zNGSh^D_v(qUL%eV_!g_x<(~V>y}YHjE%ekFypFeMg=)QKMZ$?~_a;QvsZL-DesqB= zE)7ZLKOD3q=HRgob_H?unXVF9z|*>_UNN|Ve|*7i`o+C!qEftcnXs;)$8Ie5W0
8DL^&9@{Ksdb5-A+aGt>5l{~HeZ{oa#5`w8Ii zLi`qBYucy%x>zvK;V|!5J6l3{y;wuBv^!=_&_4jpOXLO9%)zPk6TQ*1OVT&vmbQ>n zmrCixrdeWYqDIXm-EldPY3;yIG5#0iyIY%^z`eU(g@8lIVUr;=t*Y3v++$<4M+ zMmd&8#QZSv?IJp-7Azp6eeSI619ozT}|y1Z#61?!k}ZeDO`hS*TzX11+T zK}k_ZqK#X*b4NLdj}boYS1fMGvCFr1&sl(p-S_TlDG4xg+PXn~>pA0L`(@jB?n@_E4srCT_U*8AGb z!p9#VWP@1#FNjn;(iY~T_%#9vZiG|<)hvQVSZfJYk&-p`v}4t9$w!4t`2#`2{0D&< zzKY%RMx*c0ZDYfZ+{6__7A3F8SdQ#SV%7qLck<`kyID7cNXDzZU&BO+Ubjqji+x`B zkI$lmv5O(o|CXOp<;(wa^lFL4!cG+@u6FOP&)pw8Jk|QNP@RxywfkM~BdVCkBG?&~sf!+GL%&^$f?_Eu3pP`lryk*Y~p3#8y_P zjzt5KL>2YV79C|0VNpSO-$@i3(C4m23H^AoGd>%goaNhI>8{az0Y z?#_RE(C1tM{CK>s4s0v$!ju2Ja^=3@B#uN8uL%V_3%Ydi(`U7ZLZ_#08a!zD2}}D4 zmJNz>=%9S)Mvv;SN+mxdF#$WJ=m8$LsIF?-@)C8XDlMa<1Z0zT^3(a#wU==&p!Ed-$x~W@)GE0@3d8 z_Hof}A+w)LC_T2wg5Rpi5CoGUvisJT-ug3q(GF;g3gNu|c$eRM{5cC1dN@{ZlcIxI zKeLj1FmxD2wTYoI-}tHoYGxjH5RG2p1RvsDFL08}M(ysZMrnI?lNBlxZ{x044a4Qv z2tSPR0c7ES?LDx5N?a`LyAQVmM-TQ=k{$$m)!m&3-_aq~`e-b|U(JWiwvqtzh-AQB z3Obtb&0eH4cX}me{_95>7Z>-K&$+WTMDvR)WCpr8s~?r1r<6HnplNdF3v&}+U`9t2 zMynU%R;<=huM=7To1J~-Kav2q=;7iEW}l7?gJmR84RAvkF*fWn`r>CeF+dHdZCNG_ zb%PF#TA2-wU)i(P?>jL^^Dp)>86{b-Zwrrh*i#lOy)`=7HOcf)R0j=~LLzztIL=+regM!(C9Lg?UOb0xC@X@!>J>m<=2Nas0EAr7bdH}b|2^>(B*2 zilDa3@uFt--xK~e=Ugjid>7WP%+CmFc>ah_pyXw^%2woSe zaV#^*hA86B>Xfq+Yxk?Iu~s+vf9qDZm;0{4gLf%GS93`bbNiO#N|v(5HQhEdyKJ&E z3Q)`!4RW&QBo{E$e3(a1%!M_YjsgHT>e@DN7{;~*MAq+j$jZtld6W+sde~y{09y#S zMLKwA&nC^2R|G-J#+;E6_LOx7S(O26YO<4CIEL51+HiZ!u79M%Bl3bb(76Rv8Cx=N zbXL!?YqO@y*1bwDtuN&EQly28H;|ci#_IYtiCQT{1_|n6JkB29u&>(JD@m4PS|sC8 z-f=c=CTb%MwO*r@BvrQ0&po`L_kG4c?<7P8JdX4l)l8P8GE7M$biHq4qghT}vgJNX8AYC8V2N%~)>*k2Q%v5#o zATH)P%F*JF=J8`zt$C=Z5T^ASzx=z@>LhkyF@9GQ#-k4pL$?V-KM*F~RQep(jC$4O z+U6?%4XjQfOcv+RHTT~Q0v#vbG?#Ij-?Pl)EFUi5VA+wRNi6Pe1%e^h-QQDJ*!!PU zpOD-xiy!g{M8dp};oOBQxH35kn2WQsCf=Nsn-%7_A&-%w8M%j}%L-ges9n1z=wqzz zGRncSxFD`H$@;wpWor&Y3=$w#GYa8sx_RH$d)sU8?7Oq-pp zB-ks(iNuYvYz{H#N-~&_vmJ^Hcau{K)5@BJF^5Y}bq*$uCTVo7Bxi6)aKaa^u6#1S zJ_3K8I8dDgm?82znhuxTo$CJt*3%dZIsb!b_V*-z%06z?c5HZ?gmfqkv_XT&Gt&fs z@7=zoso04sp?e;vi^+T*;4-ELI3$vvqRn1=jV|Fc08^II-h+v2X-?h2N$RE8~ z%S_o+tEez1*z;t0-jxhN_48ANe)IT~g%6zU>rVJUX8V{KcFiN=BWvcEXL#j~_vkR% zhTYfa^trW$gbu~cNxAy7oweB(Xw|u-m z_Lq<(V`35bBW;gFW9^*kQX$EAwNaxV$Yc!%nLX|Ef2v z(j=^l4N}n=;veMmpzUP;&EY|CfFwW*wF*$j{li=1Cf|?pG5Y$S74(1RtOePDH3BH? z!1VlDA-s!?GxdqBhhG}6p#g)om?^(l7XQSKSK7dR+pi8iYh>Zw^cX=TKV?y}tw~A2 z*HS7)!sh`Ba-P`=Qe!^@j!F^T@Dm$3V}_S|(ZhZ>Bt;iuLtORKb}@TJ_xr62FmnA+1C-)cv=G=86tcHrgGq|-{PwWk`-{6aJK z`hCrORl&H+qXN+HLV{`_pB3HTnXLjz1(h~>x5~7+Qs}s3iK(+#OGOjxD1In^cjL2C z62}}eu27GEei_hwW39mYndDbV_oCa|3dkw?0_|BOcd3s!{AJulHnb&cAW(>j}yUeQs;}4a8NYr)eXaB1ze;lBxMVs#sRTy9(<=bebp&F}Lid zCG(BjUrM%Ea(Ae_YhiK_2}zHdG)#=!*4%7bw3YxDJIpOcAxdlXebD2tY7(DMUoQDR zMoL2bV?zK!^-S@b%Zx;c;CaUaH*n88($I64DlK*#w}*$f1jon41j#ZH^{_L&05|hK z;1BHq?qE>_S+S%7?&)q2O>V#{o0-?feW|0KwWI0alWD0Pc+o|z=?zWr5lv=hrqIvt zmy>{3mSn*FKrQMG%m1VN@mDG3`3GTru%>z^dXl+|9Nj)0+eHBkdk?$a_72$0)Ed=u z98=D-PT<#yg53>Vop+K1SsIMZ9(L=@i&6#|-((Nw0Rx7imq8r^F%})1{4HCwrci`d zQ>!h-0q~9*-1``RUw42eS<`M*tMt#w@+U88m*;Qatt1(~L%=po%D{qLnmk%*zy8Q_ zPy0r{P}cxvAT*74#q{%OOPVP}F#=AYfxY6^MOCTPz?tqh`JKP}4~E#kJ;n!3#@R9d zj^%sm?{)~r^2b9$wEHzBD*S*(XoLiaq*_Qq#iC(4jBz0!6N~{fS%Kzxvgciijz|-j z%6*cp|6L(W6zkRLtLwuat}7Dn#vA*X%c^5hF8>n&S zZSAaBilB=#Qz6JY>9#%&_kCSF_It7!sI9+s}E%Iy_rARD3cWs8KL)(mqvD{-@|#Jl4KGdkfyIjssdz;AFmi( zo6sFOEsYmqFh>jdh@VZl%0;`0#M^WIsFu_#BwWHzP3KLWlzAp@C9M7YwY4AB(KEWS z0Q40t>`ns=Q+8}|>PUK`wHcplDwaXW;MV;QTPc4(C_G=fmfz>^9oR;6`Ln-Ksy7d@ zQq{voOS_D!5oSY8>da_(qtFRw}Fi82zfH3(y`gyWsF zzq>8A@5_C)`T~@aWwWso&wJ(jDIiv5MMnNEW}0y%IrGB%&U8t|c>SMkq<7a)mX1pB zX$7!+o_GBtZ+VU7PAcg5UEcDnU7l)_nS{b=$^$$I$!AXZr2K!^+<}E9VB-Wh?)2B9 z_TPoj6(`WOKE${wV~!;1Z)xOgo$0&o!l0HR$jr%BE?Q8FSY@a%{Pued?kPj4H*^>8 zI@|gIBq-0TT|cL`;yrtKeKGDkOkh)$*{dQE zail^S7z#_k&nl+wlhm~h6CGRYoGv3z%7=1{MexCJeG=8irg0p{t?9zW1w$*?6KDDN z$n$>SwJ!~Ah~rq*u9f3RtmMAV4YzLi@)o>8+9$bZJee(Y4DlLe=$J}KBKdh!o1OQF zbsfaIM|DPbM)Em!y27}04wHYC>Wq!{l!JDf^r>1rH_iZJ6301?W{}W(tvA*9Ii&z= z)?kdcL-;d1EDB-!)$8&xQVE6jRI1j$o{ky-!2mw~>@#ZQv#Vs$hCvOh((xaxtA zV<$~<%`6;kIo<_9`)#9obTsQ^<15m2S)QiWZYThdDwJY5Kfuq zUswPB9C>KG*vwAR;!4mtUrqjdzu2rikaEI+t9Pid5;|zk!?{{9=Fq{tE6% zBP6RBCvBB0Fk}@_kpO84F|7@GR35G=082xGE?SE?WY)cA`lMpt?w!uyxb>xB*igQD6LppLMF2B zVAkETILBmeRZW~;?fI#V9G!Bz9_TwlS(aK2YO2%#jt~_q!UT7Fhqht)`Y9FP58C@p zUD8)gQ}8`5I?`SxfTSxcG}%qp3q%xsvf{(81gKEGdB4{9xz*392aW=R(yRnS7D=~| z5W*KAC`)-6vA?U>iQnDv7hEj!E|>0sr=Oul%8}hJ_Er%Ef<`DV?tzf6{^QZMqFpB4 zbcqi6Aq%*Nk*V(3tyxm)j6`HatKVNd$6kNodeO>xGV{Zq16b(M_9uX#R3`dJIpxoo z5TpMeWp5c5*S2l@;)NFO9uk7P1=o-S4HCTY;2zvPI7EQp5?rc);DuXo2!TR^YjB64 zce2*n@7;6uKKH)&jUV!%m}8F7NAIou8{@$irlBhJh15pEtG8`(ttZh>UDkz#VbJ@1 zV)5I_X9>&O{PZ1yRuUmQt+yhTw^QcgfjuSorWV9Kl3RK$-=t`1cBCML>KnTd)9n}m zD89CKxZTMc!@xNVzs*ke!|{YbI%r@Q{Q;7@@$I(GsvP|l!HC3#q4?>T+=~K@n{Q!1 zv+Rry7L5SIQ7G5{MVa&8cZo-KlF*ft{AZHEj!>(hg(nzzBna5pZIF6JG3$!OePC#XBBhC#XHZ@I90&&`@sOtvC&c-{7MC*!U zCFJF3RLE!w%6AfKv1`ZMSpTHPjFesRFB*5O_f{8*e#g+Rw8J{WNEgje8lDrq{w=Wd zW*u(a({-Sb;4tA!h+7zK!dIKAe)}Cjia*UO?tLWw>duh$!K&;k5Uk1=XI6;!RAxaw zo81>{tBFs^+0y3Q&AQBIVE2#{$&wXck>l8v5s+tEW5)!|W7=?`X>TH3`p|e7+Vmd@ z`rkHE!_JO2LF0I#ml28NQ7|=7OA?Zzu-~oS)1$`w4a%ZUXw;|}%+5sL(Z{kyBqGO# zT+WztS8#!l0f{N1K0~deCg$68yp}S!G{eaG))&1uE zE|Pmc$uO`^0!VZWUC{ef)aB!U-u(ueyMUqKo!)5Q-!1}=r*?DyUPOG5vi|$rsDK8# zKPV!;p6k7pJXnP9IPr=}jiN#cr=M)WzQJSnIt3QV2eHmQntZsptLgFmkGZ(^OqU#B@;OqI^u>J}!AUj*;RsQ%I zYw+ji`5h*8tjRr(USqVQdD|cfr|})SH$H0$E{H{|71-g!VE5Ftj<+UPhnfp=XTUh#u0;H{jJ9-fXc=E48kbU6{{lGL_{~ub5UD z1LKxLypdZxVF+s5wh8Dv^BC!-aeb9vg&;^ve{|1Cy<}SloZ*^y4f3p}0OXcxX3`S%2N+tqlgBYITNi@$HmsnmT%?wy@lMyL=y_{nrZEQ z94D3%$m-3*Ktwopj`E?7-C?UTu3IMmqtHRw-eg(`ivX@{B1kr zSoU$)=`_pR6H0l7^xyW_3YG=SV{c=m@D7 zI0+;X^liyK+7L8>D*DkW0E#mocWC-54NF@J4Q|6SRn>B+xTDx z;;p!Ftn;g+Sd7D8{Oo_=qvF??D9By?Mte#-Y`XyTlSJ3?ZqW7sSQ1?p(U8sNnCj^5fn$F@$!u|K4lt6nTMA09)gc(qe z_P8)sQMdnQ?c5TFU|zKL;5PGnxO_3SygWF}yi{_#IyxMab_cZ}a|2cOL13 z2{ox<*qQPDdJAUEH5>C)URVRdQ1yE0CCb3Q`+Lc#3=)LXv_u5F0bj5lueJflyEK^` z>b8J`C9P+$ph>BZhrBW7NG6Y1O!H>B+W|E_Fw#LJ?B>} zE#EXZhKu3Ny>)`K+fxWu@^@***A$DPu9nKJF$rf@jrBW%oRgAnjOOEJ%tq8H(4UbUwh4i zFGA&dFVh(N?WG!@w-8{w(uyVC2F7dAXwVtu? zJp=vt!?idAHS(fKVPPnwOAF8r36nh;v+EVvf(g_71lgIFkvyqdfo+iPv8?Juwh&^p zLeTuN18!RsT?WY8x;F1(`CE|%O$?A5&^+<%Rs_E!+a<4~QtwclU+o>@T>BN$9{w=# z>6A2WTXYU8*Fh?a_)W2;J7p)y-4Cb#?#uKC1mhx}jhCboO5WAKM)ARVv>0&Y)^KQk zk*oJbAh~j@s*le6@~SoPTIa5Vm1|$Hg+8w}k^wq=IsQ^ufwwpsb>;<9Ks}zCgrAfn z{E8^U;~49*$nzc*a$cRXhvfc%^IUDa+i7#2)Yfs)>Rwxm?_;bD{E(xwt!~p5f%oDu zQ7?N*=F2;=rvM(cw)M$9f)V)Q>`@}%(~57qVkarn7BHxg!Eg$6!Q2HZ2^;+>dm&ID zEX#XC*!gTSWhKqnD8e`i+KBsrOgcQtzKy-r3#yMQ3>yUw~@lJ-9`=$)!HB(?V!|ik>!PR!;+3<$u#C^?iOz*?v?p+$27-Z)FaoBa^zGlhpL?QvYCvzoGaT?=-P6tO;H0 z2FT7e6}V*})P9g^(wps_V!SWyJPvYm^uD`QezfHXKgb7fPT(?jjBmp8P7wMMr2(Wg zAAYywUmW69LFa$^8lxJJ@PR;5BCq|hJB+TXqKZ1f%~>_R_qP|TdBCkWQu5Rke*77# zkM6G0_FKiNxBqE&EYNZx*B;k-MeFxh)8)E&Ij>0VZkhfL8L&-MR>?m|kzZ|p{|7q6 zjRM&BA8J0?ah8=5k`3|=@3C446zHgrY#Vh~KN;x2_AT+5x>77V%{^+n|BZ0z6$2Di z+{ZoHO`r>-3pBrhE$m;DyO>xL?>_f5X}2SSYH=M^5(iBXVJ)3z3r;TWYEyGgej7<0 zRTU4P_>ATt?y>y1IrWfTlF4qz&w@Uwp@W(uNL!VK0>0!WL)T2;{RKWFS?(5)H*xo; z!oYhT0K7zLs)^CwUqSC9?*B*`x1j#?pWlHb01?c@mq3PGY$SaE#phQ&(=|VHfzJhc zQ=LG?1q73|;pj~^+qDoN#&BGq4|@=8$v?w?LM}VW19#zDY=~fx7hmx6vVPR^VeX;d zu=>TW6|)3WsAEl2O3(oxzTdlT8>^MCAJuvgVQP}=0?QNLuLfKFZeCvAY_^4s`kkL# zmj52rIXJoOC@dl05OIxnNyZ3jk-6+;v*D1rgt0vx^+DKV7mjen$8- z`%*CD^fPp24Vo49QH|iPRQI{$9ooFv-Q)4|$GrD~y!YYUel~{z*<{yOLz}HZqXBnq zxPkq!)Xnv>TS}MRHMNhR>&stv*ZfUVXJ1CQ@4lJxI#rs-)MekE2CiNvWJT62_@5xb zlG-i+%HU1{y=i+hKYn-%pf8DX*2VL74me--w*Ony#T;?Mqr3jspVfX$;n@G>&+{iy%GTfhboHN+UK-x0xSjz7?%7pPR7t}qe6;Eg#Zo!W zBH|Dhar}_Xh#t5xprD5{Yl=;xr--N)O<<)c1#g_0vSmMS)9W!Si!X>GOYWuCwbh1+ zzwSErQW;9AQii6JatID=Y2j>5MeEuokB?X_Ys1xLNxg_ZzTdEVO?{C2W4|W-qkpU# zj+*;xr{O$(yL{csN0};2GCWrI^fc`YWg}h$ro+MNiS3oC)t-31p6^|j zRs|AtQWp?j_oQR&@y0ZRl#7PNI=`v0d4`Plf&N;kpDc znIl0efD$Yq(o5XY{)u~ojqyI~$)dntVsgdu@=18%dV+*l%D)Uze=9RZJAsP1b#JfF zZ0K18kXR}*&lRhtVq)e)KlJCI%}25T+R-I zx#S7}D{xja3}~9p`3{XxecZp>5d(Z!01WVqi+BSp*afk5vsgRXPk1b^NBMrZW_>A? zXotbk4rI~1S2u@YahT7IN1`1vOPm%SR$E$>uYsZVyaUL(y7L=ss=@RIp7a>7OI*JNrBU0 z5+dkUvflHUmroy|{83n=iYZRG7|Ytto!ZaUw`VK~@80x0va)JdQlAtd4~?Ed z-kKkO*Xm_}+FKdX(pBb3xgRa`X?gHj>ppK)9G1^nsfp0R$I{rdSLTw1nZN*?c9f3s z4_xD#i``2i6}3vwvUPD5c}B=#7LCf#0q_syAU`R9i1S@D{=M|SzA7NtYQB=0l9_?a2jq0!Nt8lD(OD{#dd z)TjnxB^*ZqJMgC%678k1#ZA%^@1f!0oJ1wwAtO79G#Su))P|j^F81BmxV|*@P#wba z_Vv7Cj4EZ9j#bTN58v>4XAv6p3{{dY#&-kWr|E*H2HG5i(+)Z?0Z7lWJnG=8dsStK zV}6)|X$V~DjOeNsCB#f>VM~PO2wmin+b*v=ws}BEW9Rl)up1VVV$anyikz|Ur&;c; zC7tvLIk*S2k8|M_>eh}jcXZu~dugO>enS6@WN8H#b750);ou7XD}@YDOH+ypk|&b) z@K%IiqD}@Bc}q|tsD&)2GHDN!5J`xk(oSns@JaSPaB2)D&_%de$lKdRB~T2_bstgE zVR+A6x!hbukb6o(S+AJ9JQvbEzMAfo3klk1P8LQ~80@szC{opp*lKo7={caRnC?vZ zIL#(SRMgD=gt@;-|K~>)XA9q$@C>YKOaA`bdHn+wu^<2Z!gkB&8T=&tQ~)24cP8sc zzM~CbW*)cE|L@Ep+Kva8i}$&UdTn{vvuNHvq3KA1x zx6DF;KH>Z;IM;KWi(Z?Q`6O zkXqzSxU=1A%+i9P4>GanGcjU(Tgk-Eyf9$KJ!& zAsn@yX<4a@JGKkAC1`~mi!9_K-wN||wtLD#UGHCw`O5EvUvf>A{R_|WuM&v^7{X1P zF9s(r3c~u#L+5!>>IdVk1u(Twn37`iv_|sB%gxNH!;g-bx8sP|CLew17eNU?m+T3D zp)O*v_WKHpqf#1z(PBo~@iOESE+^&jY%CiDq_aD_XAVTK^-NDw^zn_spV89Nc5J=p zsn`_Ccb?s|ub;r`Sq@rUmmk_x(4;w0<_&esk9Dkx4fi3$sYL@m ze$^yABawVGH!Wyb6?-`$uswcoeAUvq*JQRR|&Pcs6OF3Cgl*$ zmUFmU!2wl*<7~76SAYfS?gP>3n&MJ}ns&$l=+#?>G_xN#T)i0B{2Zu7r@w~}kWTa; z{=0N&W7o3L0KCxNc3QMd1<#;<%6B4O;P3b4uh*im0x<+Tvvc_8=l|iYs2-tb)BEcU zU@f`*__xkE-sGWd2ZVfR{;Kl<iP|)JyO~TrtrfE-+%0*}~ul-qi@{5azt>1o3Wh>;>>35NIg1%{g6l{D%c~ixT zQHAUCQOmtdky9Nl6cWx3(ewbu7!6?W;Q@%ER|Lq(EzAm(2x#P=YXi5UD&uD45bsy* zDM&En51OC3(vGloZky!Rk-rP(?z1)6=|+~VWPAW^>IjD#xrwIM$NY{5?(j4-5x!9W z5OW&k=|aV4y0dt#N?pC4in3fB5MuZgyxGYy z}bc{UPwU;M#c}9iRUf04*9^CSV?8hu}n_N_GoKiK`B5xF7O0PfrO6 z0bpb_A1ne8oX)y4DTw|xlzdX21gG{9z=ahW+=}+*6hijZiiC--2_kXJ81uRp#L7%s zH}c>CDv#DH1_w0vvjK>e1$O-5w<<&7TESp$9W3Ilf@o78sgO=IKP;Y@3zZT#(N;B%6Kx=bGW%bC$|ywl*zhv$jX`SfGC7`7&wl`tddp@vq7t!@2f^#aoX3=5s6v z9nJJ~f_vbKE+$|4iSWGh@RE-x5wKZ|$^!k~hwH!egRC0pypM~rAYy+af4m8J=&JwA zXy_g`kT!Zfq?yoh-~y&O{9t6yzJ*6%EwEgP<&Jf%@ioXa+Eh24Nv!x#P3nCqZ!0y7@<_`avAn>iI;%C z8Im8FfePM|fBAu1dZyQ4?uOa@c4?^;;y=I(?szF8t0{`P@d|>{^l3)Pg#axa>l8c2 z+u+5wr*6vf_>SX}bXjZLrk-!$^<$~QB9Y&?C%*D+pUj2-2 zS;&XjYXvM3r0r#GbR0Dnf505c=`hbhVuMqf#lCu;U+WiwejCDS>vq3ie190FU;v{O zr4UF7Dy#SXtG$XfK{qd@0I7#E5TyD~O)mRTli%+3b*_4}E4_Jl9R^xT$%VpRtBB$r z6a8Ap#7JZF`CN(Rr=y66d19B3J-xaVZG_R%NglA0g}mFiZbn5j!X7UZym9e}N{bMu z1lP^2bI*(lCywZ9RhvUacs$)cPwKTCR9mJdae0rW94B9cPb?|eehv~B zKg=wRI_dCWDsI96@0rk^5``5EL0YyE8h|)k7Q%k>q=8z4w#*^OdJOI%v z^Aof#K5_Q$@!H9UHTuuK+*Dx>#%E^eEn2sZH8T$#tmQ%DJHG0f@~Y>yfsYT2OIkj| z--4a`4<6t`+y8Q3D;5v=Elj9NpgTUrvtZu+QR9krqfx-Qe&V`c`MSqW)`0j}f4NdJ zbv7F7JS_QQuU5*fkT?mKM{KW}T20`Ygvp)}<`)zW;sn5iHsaV4PXg)#W(9pl=_hMC8+1 z(|e5UW=^b9KH>+^x;{^woXLdT_2G_YLqRQIl=jY-6?kHnC{KGNe~?gCp*@*1y_O{` zP9sm-kl}gph5);X@Q0zL{%pWxX5Aer`Zb2Y#-Y(1EL>T(C# zhn&$G-IVw`pQzEb?<9B=8>=bGN^-vh!ZI$JPA?<>q+vdD>-`{sxC>d>m(5=EJU!wW zrLWvR^SL?g1R%8UAIPgB4^b^rcYXI1B1tbOI1A1dkMBOu8uZ(3947y_Z3PY3R(yvA zbvl-FI1J~xfUb9BE8T^yN%H{4+8NK9pfuE4il+mDvxVnHnVGWrWPQ_7sM20o=M5P}OuD&IetS-4f)CZjQF}yRl1ISGR2h*pB}?HHo3G)( zNEF4Cm^PJ9Q7kJgfCE$)b%*7t*~{K2Ek19!Ess1*PClC;dU>BGBBa0uIpJ09U#swM zB|hrooHs}!(de8XGdMTRe*ya?34F^T???nORwMg%F;?R&=V$^kCE>${^R+Mp8a(us z9i-A~KkW#=(0y#@hw*i$VZTxk;>sxVXvE+sJJ4f;FlkijL!j1vCT3+CLiSY|h0=wg z7M^lqG8}*i3=ga=-QuT2FYV3McO6`;3b*~iduZFkfmT5;VC|{F-u-ZYSPSh^f*fmP zjau;aPH9}{10%P}6u?sRB7i)ye(i$kArfop@jv|<^@n~9oi`BPH{A{f&0rKwVALCc z8hl7lWeg;Ib+WPXOT8;{bcXPyxsi=%_ccyKvu?}MrJIGN-nv&Km{xlYUZl284ua5|fARr=cbF*%5Ndgs-}vWh@XxS!}2 z9xKM0JCj)_z;%lNa-Ozfi1$uBK~OADo%N^0Tk9Q@rwt%J{9Wd9Ifz7F@K9sBBzc|q zop`1AZElCB`*z^nP($Zb+r#ekG^BaM9VSHxzGgbP#Ug7X(1$=G3T+y0jt7r`mL(0; zZ=KyUsA=ti(Xu3!a7_~6G4t8Wb_X+LMjUd4-QpmbEqu;gE2YnUap3c8ggkW3)8*fe z43vM`IIi|6Uog0t_z&HumzD+~)Fh~DX_Jj&L6C?RQQW#WXWPyI$603vec;Z$na1n9 zpHr^PTjlIN$yM1Hg_#- zJlgJFR`&Wng2`Ab=P?HRarAsDgtN6O{o}ZH?hX@(aF=$|D<5oqZ?YsaRQ&>wXJOwF zHei;z&Sx`>H`?k_7@SZNcYnXa&03-N4Q z4ZXZE2nbsI(0BRk#8W7ND}lbiLvQ_N2_)67 zD0xf>znZ+a12joU7ZUf!P9kSRZihQ!`<-u7KuH>;RT}6?hRC_fWF7CQ`rx~kUj0gt z5ZhYYT_fATck26#^(_vy_(9+Smsfiq|ECA^Aa>e}*5`lf8EXQF07Z@7YD2Mn@Y=A$vR;|q4XCx^X}v_*ZQ)DDgLnAI}`=(F&`@RIE?43 z$UkIjo`*Z>(5uI>do&*wl8@13D~c%2G?P>@Bp)D|Uprf3o)UdB3KMI_jC|8q_c{TF zFXTy+U&--Y!ZP;H9Bz_aE8DjvG@bkH10{9+#WtG&dZu1R-(~dx=g2kCajs5TeqH&+~p{E=v0%OYv$1l_Zb>CGGxPu6`g=}W|yB_u-MvlF) z+5wV(iJfHtGvINslzo%dsy^!l%}xle4#AJW>|6lm86xYSG!J`)TU{QcV>5%0h(P~) zkjwZeR}VjvhlRlei<;-_j0{B&KPtc*Li8CUFPx}zh9jipF-jH7gNq-dWq2dTilHgA zmCsnw4UG16e@%nePc{DH&fk3<|&7b6oY z5`O#0`At3Au=D=5by?ODB*ugXQ02H*lyukBOyugAxo0Y0%Pc(J*b9EnNos zKnzg5o-)eM@Q$F6bVs3Hp|vvk-~dHeo&~Vm;f7yo2P{RdP0= zV2hTi^9#jrfV*KoSBLkY+i5*5;9dq|15XHG*F-n#bMZYXYL`@%O?S-pG;R!&snV8( zG20xCeJ2Ga=p@W#~w)qz(tD`dWs&1Nfo)o0Vf-xLxdBzr;36O2y>Or?N z?E&oz_8|YNZdS2*31@Q58PL6MisMWBLJ<-r^y!l%LmFLQPM1*~viq5>9_>gal9Bnd zPzdT4xSh-LhKNT<C2cVocKWVjfE_SmOeQbJ^BXyT8)=yEr49J_t3bs|U2?1Zfk{LYzD zz%F&jH;kj!j<7+v4MBb^HlMwkL(dBIQNwV!;fWv>F`!(F2%5hSuD@HKb3V3N>~`7* za#`fD8>d8$_pnHMp90n_85>*h`fu-LbPpR_y72yIYrREDHmVGIb_{RWm_Cd$eo~E+ zWW!K673$1=6K~&Tu*zlcua3Pl40rgPwXC8Ypy+&J-Dtmwj(%zBPFAqtHZDq zWh%E)56eD8cQ!lb#O=%)J=%_ey;0%vgS&MS<~M7K4(k`n;Y?H^rcyVhAfd<*2PRef z8g!lwReP;Jjy_ObAC!3?ETuS<h>dibiDX28 z@up11@12o=V_5m&=Rm)MMzGcsPHohW7n?rQ9|u+-P*B@qLi2tT4#3h9z1u&yr|Y9} z8v?6e)IxnFB(Lb{`_RdaV;3b(2@E?J9{hJ}Y>m2C5mfx24bCVI9`l zAPJ$gJV8BuQzbqFAWn+XwthyZjxh7;u)bgC6i{*>feSK1vuf?u_V}i0&CF>P1g7(8 z%}s=6A#k#5SbENM`KjfOGa?LL3B;YvJYn&kU-&8V&H1}Cfvs*R6}B{v4rAmd8jhC&Db6SnD7LJv@h3Rsel<0L=i##1% zUCAIgbRgj|dc{PG&shE^tOfUND6VhCgKrs_Fd>W9j_z|^K=_?@22qek~*(>qMcIfB4P^ewWL>q zXSTXj@8gi2$ie1K16uP+RhStycJjQbRaoLL(s{a!m_~xj{N0PiU|7EVO7@jo!@t`R zAG3m~eh^@O&_?@)bdyG2x;}fp>%o_1l0g20W)!AAn9o$m z--E`?N64#UmYXI6ef^V(wwE2(mpolzNV}%^?aKRb?G_ZoA@(6@-x7Hr4^p!z5;Iim z5Ig1^%^PcfduclI#aN_)W0UD$NA7%PF_A!lcC1ch%Fw7nvZ#P40RX3Q<4fAmO*`;E z@Vx};*7rJXSCeAW^ADy@uM4SLepRs(j$h7m4qhW~&5@|NKND~RQnrnrYK}*Z0*_`$ z9)0=h-0AGMWGxh|%M~6r{TzB+frUt_6rM)&`uLfR$ZWC7Pj#0)PMs~@<7{Dcql%I0r``fSapSZmb%|W0&7{1mf^3c;#Oax*8+U{=;c=<%l zq8xIfA*RHq*y*U%jTEy)KLvxhy9{ZMf4G(zsq&J7feMIW&7@NCyTc z+1u~wQ)QlK*b9<|7CxDJA!!G)!yZc`?-6>mS3a&dn8g8?sFIf`5YvRzrvUTrD6=8= z2ZN10@4-`cvxS%-5fEK;69M=S35hvX?+MlsC4XwkTlJUL-y{62-{za?QDyqj-M%4= z=~UYLAVRP3&dysI0QPlvVQdMSkpqC!s8t63U+eA|V+Ft4ebAk{k>E)=^ht^IR@rAN zt;rtAqs>xz*yH4UeF(VPca6yB*VsNv1w{L;*Nz#^omiI>fUc4@hwBf3tD;=E2*zIl zNnH`rd0(-H{dn0O&HtNLlm z_D@ar55F1M1pbj{Ke226Tb}(Fy!j<9c7LGD!_tUns2BjyK6>arl*by9P?bnH>evpf zLji_B@vBfz*?ruw17liwmy^emdh}=P2sC!&MM!$y@aqL80uDiIaqQNjXDgv#>&Mhc zi+(N?g-&G=XGXEXJeF;r_W_fM{*8B&wB9%1pjQzbv(p))hQ1fl=1`qd2S0tS*26nj zo=Y3U@Wr~5GIJGqIs+?OABF`6CrV?k zD-zv&PK6-jCR89p?iGZuN5l);|I!aw3NSv{;^te;Cr&|2TmT}mU3;=Z4)GZg0F6QR z;oFkqC+||`WY3UeC6pCklHsj1txjwg*SXjjb0URJPX#+cU$-kii~At5sTF63V#KF& zL3JWJL<-*4YzgsnC*_s{V0imBk_QXXE3`w_AiniWjkAN?PdBUAV|HrN2fMo*b&m7V zgWPR2#+RMs|J;kNf27cb*VA5uZ!UD&qLrXcC<&4$kAHe$@{W(HpK;EWe^==v6e>qG zD#L3?fA8FlqP>W*Dr2hRsjTpxhFlemP*tHjprlE7_;YiWA7n@q-RU`~w_oHyk@$Md zjOyEaHk$U0P2nS#wry=FpJc&LQ`Z0c3fMC|7Jra{(*Bblw2k=#{0Git$L~TXN2G^fc)#Fqzs^R0L#ff*c@}>3w{Y8R(OP5DyiXd4`|6}Iy_1l<4B!ue@g(>fcBUQChUl0tnU~EsjJG!<(_^tO~v1uULsVIK31p|~)U+JWosE{!&3Vm= z;Uhs6@78aAffzR*MExuSM&FEyNl7KdQ471`c1Zytp!-Pf?rmsJ{&RCK12#p=Q({h$ zU7NBZv!`T+ND#Gq$&9qoTZdQ+EVDrG(WFY|mxsz2=$f_SRCz{%v^`$Ir2$G?V?p*nvwm2B zaGJz~=d@;YA;F6QT-1i<>r7L5DgNoJ-gxt@=r2KK$`$lS zGmlXNvTO!wMDfNGm>oJ0z4AVJ9Lbie7(#x`OIl%wJ8Veur#L!jMPBwKF9~c~$}(d2 zB;0Yn_|Rd)yGeb}tU6Vek<`-Ju42Wqf%>>YxBO{(P71rP_d?qu5RS)I3Lru3WTp9# zm*k9=+1(E<1KJP}A2C&lGp(Dw#XC-X^*XZM);m0>_WSG6gw+t7z@X;+794Pb5vUQ@ zL9h~r*Nd{7Xc_|RNCg88)!y%tX!!w0X*5C1vo7KUBna#V;QOL^nrP))+&rVpUhw93FLx&1sNoP=*xk+LfO z3AiErW8(P_!4OQfZv7{Z|4Z*#UTh7ZE+c4S=RmrXOzA2tl>757+jb- zF8zTLB0+MVkVY)?R{+m~_SEDf3lkdVlEcHz< z5I)867m3hObI05SFB`fAEl{Rof{arWU!++D)0Afp>6LT0&ml?jvm%{-Zkhh`CCz-F zv&z2VcEo`TBLk#NQdnR)$CwvGLX$wKg7tkOC`ZS1ni?kxVdkyt&O}2%=D^g+I~kgT zKu;J9E}qtTR%r&%!t7atRki($<2XVgu0_lvrcb&^C#uw?L2&zQuZlTrUzIWHylGIE zRkruxv3F!tskl?yWn;(1t*qt6tBR@o`iAN2V}A~AZU7^@1l%c}NZu^t^}kl-aQ4=4eKw%%pmea0b#36a%lEgzo;f_aow2c>;pWgf4+aTL5fk+ zG>?fn@oB0gFVluoc219GszPTmT8I%nIB^=Hi`O(Y{VWw9O@Z2_4WnCvbPO(j`6%7k zH_W}g%vRO9Q%$NRSZb60cATEl3K;EK-We!LX4FFBPt*r|RR3qJY243|C!-%9d?msE zl1&ioC60=r9gAkFHe_&#?1HCz?O%vx=#oImX}R8!a+9sFzQeKPz$X4_G_Wy_70z2? zTYQc3-Pi9u&?jmpLCbVR3o~DFpQ&J|@ESY49%udfdX!l`Hu5#`#E~mmW>gy3+iG?z z`p<|LDUO*aw>U)Ajmtq`iF5*q?=@ZtEV?N0n(`cT8 zr7LGKCl=BR$V-CloN|!5bbz)!`F4qYitrTAD^uOj1EYm2dgf3h3FmrjAdFWt)-*Rw z7Mis_02Dx?>h4a>iL#H%yCAg3+Y_|X+^7)1*i5e99tzN%M1YL*iPLUzaf1ib0J;9g zZGH1W7zU+M(S+Aw2j~b8sgfTA-d`U9dFFnkV}ciFRi1&AMOgtCsv z0GjMtm)_302z~+3Tl_!NvHxDhjBoov?1Ttp^k)xPh2hByENC7Ue zjnvk4D>cSU&Mx8?U1gSpFQ-iLRVZPeba7GWTIp^qg5*oDhM3-@D*D4wIqQ)E>td{{ zLL2{Z8NQ6R+{)#BInzu_vV+4S04O4wG0D2GhWPw`bA6HrVXq%;B==N55n=r(+=kop z&Jio#S`n^8-}>|GWw~S+3SEnjXIz&B@iE`wByuH|&x{O8YedChcuB=`lm4AG zfV>tHWsqi3?rG_Y;6ck<%OAQ;dGqu9PZTYuHqe``P;Hhp7#c`r8uoR!@1;Aa+s>mM{CTgP zI6>!-)d1`P4vt~5$%L3_3kq!ndq326qilQ zuQ6v6T-=?;R^=I2$=1LA>E_$xJ+(?b6$Eh^`b2FHrtqAA zLTL@vK>XRBP6~u5Q+B}+;+uK$FWCtkf`(c>c3C@7QCRjs@~qVTpV1@06GRnwswfsR zp8^afF3UXc1mmTa%NlM51`jVk;=6sef4omGLUV!@v%|@DMTxo*;Q@;&^8;xE+PF}F zOGn4(>^oj`u)Nx|THUv-Lu*g}%*5E-pY~Z!#X0uRS|9YSv_wx6{pZ(&*H3m}I`GKR zuvW9i9?`AZ)uhOS^^=p7KyMR_%Mbcee+_ZvSa+UL{?CyU8e%EpYX3zNgz{j5gqen- z1G_D+DMm{~Nm|O301gtxYW&IGI&y^7-(>EP3zt5R!U zgn}Bw1~@luIRCYsnVA_?O%2YL$F;7$ey>>|*2xL2vZ`vXMkPb0G80ui1IxM6p)kXO zFlT%-Ayz(7+#~iWVJZ~?He%EU0RRv4F2^ukwo)9K^BJR z7qNcf^GWIPP=YBo6p_uaQG-o|orN#ecH%{*&uHX|{3A`>!_Z|D$GP z05}L(E@vInLjw!1r&*hVA5l{v?VO8bU^bCjcPc1!0IVCZ_6|mmGM zAxv&%Ra5wkY@^oO`~7H6)S!C)A|~z1h2hW4OSnSI@0Gh!QagY7f$o0B9@3!LL_4Og z{(=ZL2T9kVsx`%&NO~QBudjc%VevVDNE6DDBMgbBIK_(9gs1~{JqH%s-(iPOg4%j-N$UZhp zb=KbpQ5(k@oIYXgYSJ7w?!AveXj7GQQg(zE{{W@@``ZhHuo2nV78FeIVb z0Vj>9uxoNBe|A;LKXwhjH+Fd6`ht^+Bas~+GglTg*c_!5Vs&M-mB|4UI;uorILbT9 zbE>I)s?*eP$dDEXt5cg2INAi)Q@6h+f-!)+qcID*+$wPA+2Di zn;_m}!{8a9|8SC6hnm(09Uk@Zb*CPx$$no)UbP9*&`47IUTZMO`Mnj{+ z=5K8yR%xq$?$^1dHVQ}olS9cmI#R_kv*ER5y`7*GIw+C z+g!4kShC0Iq&2(^-K5+F&vE~$t`YA1vc1EAC|OE$$B}FkkS0oTl{VR0CIhuX@~QaJ{4Jx@ zRXZC;28x4q)H)d!(eFp%wl3f^{K1(LP@NYl>Cp*r6XQhhot8q5lU3C4@1~oUK;-x> z3*c|WQU26^ajrhB(2LH5`MyQkjzx*je<=I8h0c5=yn2BA++pF>wU27O57F@=S6k*7 zJSjI3DQ(`)YBx*NDXTy-J7*;;$hCy!xKV(pAG4D%8`e;Vb^g4^o$|>N`^M_Zti^QM z=ts)qXd!kW8?%ybkY@;a0I0qpAoxlEMY~}(?Wkm(x3ix?_WXc}XOH<|m5vFw_Iz3r z&R;Fg@CGywIT<%wgab!?9TT)2;-3v)%=K7W@D1qNJZ(AfhaTa**n@%xZrT6p-8BD` zqV=Crk^cRN8aluMHmJsv0nV#{0&K9-`czb9sd(oN`$#>JX+ z>~vKlVqVsVd>afe*3H6Jpx9MaR(2)bzxjoNs$ZHn+`En1F59}7w+SSf@`{N?mh`iJ zOXn1@bqDw>pJJYnxO!V?Qhxo&`gj#fDA*|4WekOlK5}k-D#5A@XY_yZ^_5XkcJJOa zNQ2TH3ew#v2#AzH!_eK$(4m0RASDfg0>U6&LwEPk4Ks9iy|>SE{%gJKoU_iS`95pk zdtcWtCB?$vv{W5iJ@zwqb$I$&&c-E2$t-&_SG}xR{yJVtI1IE;Z;7?0u$~2C4qHOfa-|$-QILZr9kyNEV{WVC4uo@hLqF{9)(fLi9E7ui31vN9uyN2}u0b`D+2* z5)2Gd58U}sw*|~M8XE521AUUsYRwImG`#AN-HCi|2jON6I@zz)L3;!yS0t;&r4weFcD> zk9ak+<*P(dF>`UTY^>q?seN_v`1SS5Ixg(w6ci@CZ;$aDQzDIEohKQ8m$2@9m$Hh? z@88?nUwIXXrLcRZ@Cu8^Wd=m@oP!9YBV9P&`_J|6uKbTEbH<+y~yh(ni9u@2qQc&j7Gdx7?kddnY<)&T=9 zx{PLl9ubi|K$#^AcpfVAQuw_S#Sk8VgMbhzX1Ig4;ANY!-p^1F@PV*-b=^6@z+P7i zg@FO%`E}<%N80VY_&Ed_O7OkTIShs5Kq*(h)i4@M9ISfnN{VcG-DR6zzt#1D%5C9DcJXOi?yP>$61GFG- zerAOwHXq|Vf$k${jz1>&@De{`GMKWv~)=Kx7aVkOij= zxP^_b`?c4M9yX0!E_AI{x2Vdx5N_(Z88JC8%3?jA;~c&~Psu)`=einQWK~4{^&<3_ zMII|Lf&``~Mf>Vy_G^JMMsiY^Whb_emq2Ob&S1FnCI!Ljv9B=i3UCn)+0q^)_v|Jucp2QVh8p(y;7;6iks0*@NbH4|vAH=ee)!XZI?S#dyh>f+6JBmbAyzKH z?NYI5`SkWD?lRF;rkXkNO94uUs-B;J>VCoZzq{ZpIuZa4LchMT;kv_Ksr|L#ryz66 z>GDn?m*Tt26=R?@q1ikc#6YE#TX`ZFWXt%)PasT*b&yBI=HvDE+xJ~kC?GoPBcbdb zoR*nzBb6tB;$h0@cE!VY)fHIB?wmifRss$kwYP!*4~5e7VuQr9Q(SBVZ={V@mk?re z-@jt7*1w|4&3Co9K@N<`n8ziN3eID2AzVoUS#AsX`>j&4XA?5@^~FbSu@0a8=UGt# zXpU}XJ%x}#HQ9yuW1!k6LE3D70O2Ni=tY9S+s;X8JwQ5B&|F0tH&uq%NFpPuW-Jp- zoQNX#v6JTn!wTU8DWrizu!&6=DBM2(vwoR(ny=mKvh`I(4=_7|0wzA(Oq|G)8Juk zt6|61iuH>iK)^yV)T#kPYSKy%N@3szc$*yU+Uh*7lpO7=nn!8hLz)+Fbjuze-MCK2 zAhd3+-Z7?Pv~G+g-YQsmihUtp=}}(r1pQ(Ke_?#%8uQljOQZONC_KY0HKWnj@?iN? z!bwwo$o?>|qOmo$0kYe?w4-cjsOhvn{4=|Gx#dx{tPR%k;IaQT&8KjV;U0zt6@T4r zCycd&U*VGeGtp&HR>LbUdfxtR_c?~_qO6dkne8s$NaD*6g(?oxY3lER-!vcmZQ$OH zykqfdxhMEEU=g0UU)50{3$mfns=#xMr3yACG+I7PuNdN$mcpQ(C&Ueli$IG%(;ZWr z5aSX=N?){0)RKWreFYjl`rL~Qx;8d&#WJ4{4qc+1xXO1-P6{Y@5x;*^HmTYUKf@Un ze8r#dRDbnOX|wd>^39KslvR!nLkkS3)JBEqw*itWo0cI{ihB9?Y(5kRS|4nlwG+wP z=ouN+6!L?_YX9ed^rZ*GEy8IB`_)a}`G2ufmX! zF3ZE859}KHWoJ_>T%pEgJ4rA@oBDnrh^7xEKr~T!wo6W3%Vl@S#lU6jg~844Eg{Zz zE4Y?HI@2rrX#gZNW_C$hk>$_>by$20f5CkfRVT0m!+zoDTPq0N&%v&VPo2#drgdHx z0%dEEZ38hO@aUfs&=07g#pl5cR?~COd$8a1?HW>-;_Mkx+vN~GIeQ#!I2^-Mg4y;u z$GF;|eU5i|rpVIw0mYx<;KV&RgDy?JOftsa4R>3Cs?YZ6u`AxYhUR#KtYI)^+ehMH zbCh6tgWoM9c)|<67w^Rt?A-i3=q^`Fj;g&Lo;mr=HdYB{(|ym#=v62D7KiVvkyE3o zmY)!vr%=`jwYF+cbt3&EvLx^HBWhb2d%fV9-e)wuYqawTEahXKia+$&^`AbTUH~A_ ztEIY-rX~aFOJSoEpqjZcq)~y2-bn@(8h`-}cwm&*+$h#UHYl1D^yWmI;~+L21g?r@ zb!pBN+NU{@oWA8kkWUo64e<^f@lGgDeK6aW2Dlq_$_C;UX@O3g4G~bk}1lyKWL_7)^xvJ zr}k9IBr34Km7YT`n~9`;7B`(YfT_yr&c7B=rVLKiXAGqeY>oAAX>dI2d;UYTML12N zTBlP-B3O@k921HgRG*i^o&c7nO?hj|N`6;yedV2|KX83s*TlLxz)Hr(O0rw^$cA0d zS6o1;mxQ5Ef~PW>4wzqZG^j)6@@SsTBw;VtksjuxG11h`!REr-B(C(drxWd${0I&f zBp6re^BCybWn+FgkMT|%`|JThQi-S|DPy~kUXi@1I%% zt4y7y{@&yT4drSM(m%ZzSg{}b6`m!EFQ2FOeVSVT&&M+bn)_4#0n!{;j7(f9lG8GB z!{uy1J|{+M|3p7F0zinR_5_1K!4)WWng@>ZB?!S+Aark%1w#j_S5;6)GyBjP^$EMu zP5a0Fahy#t-c;dg{~yxQGxMr15Q{JQvJ(HjlO-~vcJco?w3hmOW&d8;R3G$Hz*)ed zwXnVI^Go>7XVE|DCg1J1r0>{_OdE^S%+v9Qv3UQDipr3&WdJfxW*Kh4^tR{fh4;<; zZ=;dxCOItUk0`T_wH>y9+AXX5>Fo{LfWH>kXBO9oQ%n3iQ{pcnh4|r(m7n|Gqc3KM zEkhSA$x?T!>YPuOfc`VH&_SM=U00k@3#AfM5(5-dkJE>ys-~>rybSi7j~w|r91Ph5 z`5-6b-|uBJS97H4=XZJWxx(cxp7$`_u403RA3WH5-;(mE4wP(*yeJk$exJXIJJ%)9 z-LFgG{d?kfj_~^qVsP%Fe7y*^qCJLCjAO(|$rT223l7^jL%U{6#^OLw3OiW*%tveU zy4!M9iL*5+Q<0TYks0w?q@421M*uKd;89V+pAcRhgB8d{N3+5svp7N*k9fmsh9ak9t(?hIaS?%r1W zpV=o=^(oEQrKp5M%!DIWaU3M^#aeO2Q*nj69VE?Z3OH$&`YRBX#EJ95kftQcwdd$y zCU=?+Y8ZW-@K>1(>PX|o*JKxm^Hy@yaOci4tAToWKd9`+pUXNR0CcktOqNmkF=E(s zfVJf~A*IP2r~Ed#wu!{(_6Bx5zkEjtGrIKB3CoseMKCrJ<=xU#zjuZqy!pbEB*L6U zBILiZLq&UL!%f+uuonr;KBI{-J?Dtg`Xew#+S(|=AG~Q}%aV?Wnc|Ut--g1OZA;3l zcBu-`<3g0L#7J`Q*;1*#Xbg-3!ez_hqvvi?KuiG!6vdO`=z;;_8b`CH6%PGQv4`=^ zqpktPFG;%%6l;lhu`|z~p()xZw)dQ>!kbCLD7>+%S<2y46T9VAV|Rei`8!t)JMQ}u z6euRGcaCR)s^o|5k=XqkTwd8jwfN%<+<%Vs*0#BU*S}Zwe1Z$~-&3DD*D!c$-v908 zhk5m_#7W?{FpG(A>r||X;gg%kWKTD*D1UfbJ^CQ0JCHD*H^2J@5V$^1NrWw*-_ylW zgM>PCYaaB1mxd@2H0@ie0#KLyAW+JDtPW$O3=bnB0MVvml`A;Y#qWh zPfUNr$%~KZ=D&2MWST|m=6k|1sf-XR(Xmg|x8M*c{pwh{YR_vCDuClzWjJO_@?_tx<*e zuM8wXubEr$bCTW~H|w?=1E=-5tUD$%x`>cFjTDg_2P4xACR^>&I#?4ek0E zW~iE$t0gqlSJ7!l4=~W{=2@l2zDXvk)tP;yVY0OZS(Hx14{ zb|DqIN_@`%MbzemwT3j#XX>*6d>?L$Ed20&6mJ0Z#{yWOGW4bm4JCksU<0xMf~!l| zZ7I+yN%p2nz$6IGspACyA4Gayc+?zdzllaP6Vww*EJKz~w-NVx*7$mw0ctLzY^%2j)N1btNYSj0(*IbG9zJ zsB}O={qOuyeQz%HMiscycSIQ_voF&z0|>U?QF?a{5_<#7&Sqt-pah zK?<<(G#vAH2t{d>9==|BJ0A(uYT~I@XRp)3fER~_(dlR>6{T9@_CktuXuCYAmmTwb z8Xj{r#l$J4nv=!Fdf`D@Eim;h6~h*kGFpsAsZXd3Xg0KRMj#-W`_^?-3jBROs3%m1 z=TquP>^E3s91N49cS!v<@1Cd1qI}>TVG}`?r&2-1>!tMA;MmBL@M}r1sPiXQtypV36 z-k}&(J2{y}jCg(J?IaQYi50Ba#~(j*-#v@`ifyK(sqm{;G(_^WSu+RNQ;6{WX9jXo zep_Aa0M-i-1O>|A!nW6;!yB#Ay7FKd`Xw$1IIO<*7*Mf2i_sG5!d>dGVXNoh>IEkjMuElQ=RCUo#$^c|uQ9II%JDrD2fB#C=AI5C}<_{Eut%G!zV!) zqWj(!niX8?gC%=0T1$eB7m0ZNwUD9M2W2x~BDpkjF2$FxV?4=HeF}XV9&QcBPB%ro z3jgnc`tJ=5Ow$y6k6pYL{ojh<5odl&Zrpe5Y^5$yszZ6hp~gx5)-{TwB&|Sni6IBq zv9Zn5duiI~QhgX^Bd#sQLh+74SdCoyeAue9j(HY7xV{*mlkC$JIU!8oX3{cztz$g= zMmf|bNBLJK=HoWcuwQ@C`vv<(C16-+#<>xx^dg}O^n@MYNkrrBCPAC!2moMm$J;iahe}XrXM$#uPmS-v|A!dkgKr6&)fnAIjdMwCmc; zEw_yRDerDp5@q4X+{`tddfs~2?<*Ih-H&ShPP6) z`>@oKZ2-avc}MW7isKs=L&jIegg1=ehk()m?7@BU^a2YCSL}HA_sAfzOBCja;p@^JT_`pw{t0R-b&q zBDAbu8{0a2?zq@HJLTFk+#T~dG`j(=;d=m(l}kv|Sk<(Un*+fB7mN$%Y+mxUl1$j) zA&UD!;(KQjSG$md1gSRE>^>xW(^3M8-`eNcdsDyZxlr}QJkw~+&7MP4gF$>f5@)LO ziYWNUwxheP4Q?aJcK)NXm^NO)MOMW>cUwkCB#mO+topsx{zX4d-TBJI{{=@J`s$Yc zy}tjB+rYujWH>dV{udAFO&z(t>Sm~7DIBAvQY_Nz>Fq?bgu}=54c?PLBO?K-KBOmZ z#CK%Ti@mi(7DO8qy;{~|^BHDAqSM-z7}zN z>%7%BsmCZ0p&wpIMid1niu5MhRPK)hS-c^ohas!QwU_ z`HCsvccZi6W3#_d0bR#*OsJb5L~8$-bC_Xd-Quwr2)FhL4uF7%?3`hSwG+@r3a*`e z$Jz=RkmK`Z-L3%dYaf88+sUal8heX>3?(tAV)w0eXH~i9CJha*tXq`wq2tD$6fXQ~ zgYK?xv5^t{V5O?-4;*#o>0DUZui_;lPgKe8pzrpkHa=)?*UuVs6K$uHia{6s0-**N z(4REU{0A-9e}L2(U^26o0GEilde9ekswI>cX1Fb1SEbgY(aD4_{1ku1ir_=`JK+C zlW5NHXygl%)D`%#w&h;6B0bxC?TJGlj|DN=hAH?OhT15WE zM&fw0g)_O$%w4fRR92=MuC&3#N{}Fqz@XAWcSTPuMCB(Z=ZxzVHX+2}oPumJV|AFG z3;DQbo8xtAiHB=9>RyVTC*~bgAQF;W9<<~v5<*Onk<6`B!(|0`CE*@fpFu0za1hdC z9d)JEv2}NUHl<8*Y>gWLiH&)q8{R}c(u!K@g6}j#_szoK+%Cy))dabzf_6XM#F@_0 zu4hmq6&BBCToDBNiSte`P=H##ZJjh{UPXRPx$9jt8gKF&)4~kJ=ftlBDP=k1yUiIQ zuixWuy3+)2;M%{TIdL`Up#D7uMqL3Gbj_iGfqIlwcE65eX5x3;YeWR5M8bI{Vp%|B zxD)e+ya*!aw7XS1wAtOkePCn&I*GkUgX=vjK|bFfWDg*GY(CY+OG5;BtKDvNo#;KK z!&ibnr>GshEFCC5Zj%9sP`ZTM?W)Abg%MSfmA5_ASdFW(qtz2R4)py~_3Rh?y#;Q& zv8<`=ySCFtw;i-sh#CCHhujmX5lboVw}(!94qr;V>%V*LU$287zm_$eumuM=fd}+g zJZC$(cG4ZmIt<7%y`=Npjb0G*cA84`Qew%@EYEi>T`17 zJ^<4zVnDB*vCQhxEZIZ{r%#R~5qp8%-{wF6am8OGqRlD&y!kS$fs-Rzs9Yeg*f+(S zHgwtZgK?2O<0!Pl7>4CPWw*(5iMm5(xw?1fYhEZ>w zMn&b4M`-G`@9)U53jw_XF~-$$B$In@r)EU36oLd8<+SM`)7ifQkB!3?4$p$epKTPF z5d%+NlC2RFLX&jn?Jh4uQ0qhiaaU1qguSsDpgdufc}!NnMg8YAHX=Lo?)Cik&+M2* z&v}Q)h*)6Pl+mA=R>6!6F~rm)AI zVxh~ucSu(*d{;7sKFntiP>Z1kWm}mU8Lt?!e}Yw=H$p$ok5|NT)AQm;43RY7eg8>; zIPr_IoKfl#lke$_$7`aoy5#fQ0xR^g>$a&0`$-^CJFtiT8EdGN5*`<#Vw08bH>Zs# zO^afHU*=^-m=>qDc{4=5ps{+Pr63sJ<()A7Qiw?9xYgdEPnu`&^_? zLL^=C^)Anbs=N7I+BQ8A0RzP;1JQZJI#DgsvQB3~j#FwGNe6dXtNBVvdf5lM8=s_T z4X^4<_h=jG0J3SzGlhD*k_?a#{H#G}uKajusBX7Hu^{hK_;w+JCzBhass1B*&5-)p zBjJ5V8}|21mn(<5wW{;F_2Tg38C3N*WhJpoJ!EIr7wuGMZB%Q?Dy6Y|KQAxC5n|zp zo3EKi6ISP#G)EKg$z}NLAlzcRr|m(pQxuv38>ol+1&DN98jV?`7``Y!pgHtr0Sa*z zCcVrg$X9z)aqH^ps(v>Q>ROiAJXBb@+v9G#6C*clp#YTPfJXdkUg|-34j(EgU}(!4 zleRqCEgwv^6mEXrka0Bb#w6bppYxD#>|4HiD0g!GaI!x?s{DyZd&tD*ckpJ>I{-8+ zX`3ZKN&w4r6p+>%_9#geuvu*Db|lqHv{GnOV0KK_no%&j0(Fb|*3~SCN)~P`b(6^6_&s%V<9CdFpTSn!Irn&|V)N`Ete~0>5gbgoZ4U0QM zsZXD`G`$T1nkO)rPeRyzw(D7wn%oUG9%O9XGjP4M={Z9|JNk33(6w3uZ<{&t+Z-oI zV+)Ra z3FtZGI#Tw-2*G{jNE%s46nw+zc*15>=rO9cKz8?$U+91PcrG(ex#SDRIuRSJFA58J z1o=a+KZD&HXwbpH#YpiT^0?}}-9^@Z=t95ZL67(l7aT12jT(ooCzKK)D6>?ZlVSw0 zmz{OvlMYWKzN-|Xt(~~@5XzV8DYhPx{o1{}=H=u2YAuq-W}C@{cP4~h<63>t*$Dva zsUPpA?F57C_kwy@W*cxVXAPnU4oxa&jjQ!Gy~)oiu2U(FP98+Zg8?V`$UvNSFeqt= zojz?zoCH6A-Hl0oR})1_U~MeG!pShw{a4ScR89*XqkJ=Vfq1pa_$V!n0WHF}>g}ZI zWV>-GC0;aXT}ca|qANp^pQc)d2UmaOrY6$epFDe(l4uAs6MDf@B z4B?Yo4k&%8gyw z-m*IGV9nO@VIY5*=x%`bHo1g3H>#XZARSG(Bh%!jpQc6n)zYowVarHm+Q`6oZ0P-n zt$y6)Hio14JcYAWVKz>-aP5<*>~vrj-h&gVXi_qL&?VbqMPu)ux9#7UqryeeO412Yix= z$t(iB^JS*G7}h1Z^TozxAo);PZm1%?(tY}kHnXlwv(V=o1WKW$Z|Wh(7=LN_do$#R z&S&;*Z*=aSZn)^6Oomy5{P6K(6vy)4p`CEGWr03L@AY!Dy&fQ*;`+XBI5^C0>gb0I z1Fh^VXJenaqH}Bap1LVpn3ZsU_(*4VDjGTJU#zb=cj~5AiQ(??maAM{cgT!>5N?Fo zI9I8y2aPQb@R_q|NwIr4&3-f}vtlN0BhfYp{_DW@UZ+}~!E9)t*gH76@LHK)7AJ=r zy1u0Kv3)9W%prALG}3#*lI{1tF(~nk7 zoOpVV*?<1od-;eok1eY{V*0uKwxu}}jry@MD*3b%y+%hQ#0n{bi-k6A?H=&B{l>=i7&eIu`jTXo-5Y1YZv=vQl4qjD=&mAaD4~;kpX$Oa%!kU z)}lW{*&RNuGkrt9P`Fp>LQ(1o#lCpav4I8X=mM@!pKgHWoLV5OUof*;Gqoj|r<&k2 zB^V=oH)}>q=&>ooEtcGizETwyr-akXSL8xcHBY2k7I~bfzrsqz^8=Tq=Eq~rd}(n2 zgyd>bD~4P9kpK7XX!v9Ljj&?H@31C7gt7iK(Yj`iRg;aJf-2vtwMk~_wGRdsQsK5% ziP|NWcEiMdTb#BGq=BxpN~H~i?vqr#1DaEK9j*a1bfjb7#1%fQ{bzG_rQafR0d&-yy1n+;ZXt~VI%_r86Ht8lkx=K90o$G!Vw4=&;k zY-{biZe#cGS3y$T>G=$G?K+R9&)e}ue;u;o?BV@VHiCv|BZDfE_es%13~d==^9myjY7guWsB~9IS1f(+b4ks6cVBVH z<2S}d)Ww;HXRZu_FbV*(v1nl%mcK(sI*}JX)5af$oho|cpEkFa@`fGd_yqCc@h3%h z3*?UgQuj@EylyR-iL`D`nDPsDxdr{8W=ogpn-VO1r9sqCLnunBegNk?T&)r$v!&!% z3r@t83Gg4%)L{d*hZ;JJA&FQfYQS+!8fntMD;p$b)Nj)B@x>P(zApx>VS3CFU;HRb z?eQ&W_g;1%34cC*MYMf}wfA0k=5uE5$FSA+nA2A%zj^VFPAc3XPRQ*GmSjZ3AD;o^ z?d~=zARc^Ss^|_LN?&9!eRV7ia?FpQ4K*A_(6Ger zpjO1s3!YJ0oRMS1PV%Vl*1Fnw=R54kojR8%(#(jS>alHDnXnA_Rd6ap67{mz*uXnx z8DIx>_m#MS!gJoD99RjW$bwL106Z5D`s8m`AGko3_2ZJDXBB7k^b2*)15w@s5&i@m zE>(EWuc>*cV}pJ%w=MBTaBtb?v2CSzF{r?K=X}dOM^W9RiR**d1)}Yp;uTFKu-tD zg89a+b@vz>*D{Fe ziQ={AbX%lE{aV^GPVEzz&A{*xU1v#i;i$W~n;=^}8Kj4cs9DRw#PwlfAjmVZ zK!$Gon%+r*eqn`UU?Q67`S(=i3^c?mcT|930VNc?wILl$RrjQhe;2yKwUFBq0 zRg~Yp?&Z13{PbPhk6b(M2((PMWSi2Y4S?91S!94Q5_x7%F%AgcAweR zC>KT~ef9m>@p+!hw<>UOqJX-glrs5ej|EbV#qf5f`OI5R_1J`Y!7rW&L3RPdF4M}W zu6Saad8~{DI`ZjmGUH?QHsXvhPBBcX%jcf%7&X1m_RQ^^UnTRdB-vg|AuQqm(&HwV zW1ZXJ*VL_fRGTxw!3IhIbcEA&0rn_~rfu?MX^+`&GCOoqI%JZ&-GrNwNQ_R;C{h6d zF47N$vWfe4xdz#zs;uvmn&0> zrc3|Ewb$G}LOF-hBLu_G|Iv+^!FO3S4BnD*u;eP-+mJwQPHu-kt2}rLD(Uk z3s&=^|3$9IiPC0%ue8Vx54CG=+tqt37{($JPxn>YK z2oGPIp`-q~9^i}>AOi_h>hTnz=^?PmWWpCmQyiCAN@-$Aiw#{)8JY;EQQjT3x_(#< zFB6+CO^ZD&m%}I7?EzoqhOc6stO^|7hMykOd|?95=1R9RC}su81su6iyH1YuE<8|& z2RyR44+lgs#oQF>U4$B~7=I5Oy;SdPQ+%MtoY<8M=zSSgLgv(Ck^}Z}t^*-fX`?qc1&RH!*}Xq$7aD6$$0eog zhektX2&bd|a_9$deYoFnmHKRKjDAKL49PZuz!*t;OLVh)Jm`Dk9gchJ7yFR))K%0LX8C_b}%r9Fl2CXw%`^n;!YJfYf$Q5N_!`_`@0DLQlQKMps#5?8PFa<(NIeG*y2~Z_ zr%aL5_%aFNmlU*^o#D(_*0S#@($2tKLh{2-@W-x)IhE>$p2IVR?st^wufTo^NKWyE zIQfaaH9gi)QsPbQyld+AjGl~$CkTId;}T~M41Q|kHH)KtQ~3$HXdbd;rP=M)mDEUg zJJVx+^CM9$q{}qkNN%roIpE#{blCuVa|;wiH$FbH3aPt!lmm~@*>2Cj8~ljMe-sw4 zDNezV0%`j&2dz5Pj`CCMt6-#xdX9?EO&C7#A0A_T>wyuf=}WX*exR_X2Gj_DmQkOyCiF3mDK6V`M$M<9KY#RY4`>S{n2dZ&9f;y%dR zgH5>Le<_I@T3WPVIJ{%gIxuzXR(Z-=_?_HN{f*Y;;2!Q=z8PPTJ`j1pmMF_b*7eE? z`Lq3dR4DdNMgBOO@ypSKwlWMaIcoJB#b>=$Z?o7vhvz&S;&HEd)70}nuo*KFb;iXTnlM#Xj2^dl`mBl8=5hE_&?CQFrlu;5eU-_DgKQSs(^rX9ZH; zu0{hNg=0h?ybat2hRVm>(vcmSQWby8l!~ZB+>}O}%+tyYtD|s3Z>OYNnB-xtRI2Oyhu#2hVl7a-Xvl3qk!|=_KIT8Yae1qq$sSgH1IZMr z2$tnmckAMkGXO1<`*xu|+nI&ll{_JReB7C(J)4-6=DOFb_^UkVpX5)xW)}R*a#_9W zmi{QD%Sm2=+qOd+@)=XsP1Gb8TpBO*;wKBQxNzk4XHj`Zh6R@%hmCo`-IuC&U#kUM0OMOU8aqk zfMz-F3 z1H0=>?oyh;^}p?AvLGGkd#8d%#)8)bLJ}LdUII0=3=0`3Pv~&LqzSHi<=C@O7ozUdNri)fC_G z_JO+?;F2MdG2kQ<$SG+v487$=Gam+Wm6PPCepoyN0g6Sgx<%Tn?b)Z*^P@+%yom!! zuep7}86{vUowX*=39;Pr*mug*QcWWGshQpaikzM;MU86=*LB8IlB4E_e)^1;Wu5!1 zqg3gdJSl~x@isbgPbbHBnM*s$h5rPZ1maxGNDQ%rTNRSBs>1qpGtQjVRiuI+zGAM|iJ2^m6%&+prW(IPHnyUc>I~Q9??yJISQ~ zaimd5?S2JL^G&qVidUcrp78i7Z0(#rVSo|D!CIF-!k`T2NKj2oj4+>h;z~VcPyNRp z<=Bt1&-Go&i39zjHKW|t_Q&e4@=fZiHoAL^+;Z+Jl#0^0Q@r%xszs z2j@8&T4^d~;QOs}I2_JQR;$y?v;kwgt74O;{igJL#EiZ(e~3ykA2ygj21&&=IRiHa{sewyrFDGET1F#!S0sQ>5ysIaZSL zM}r+;+Ivh(7^Nw)i=M^dU>g6KSydT)qU2g&$;($0RAeKjxcFL3`t2INY9*Y^r|P%d z?qL4w-D(B6Mp=ypM^fBS0a0xDDnZ@K-;*z=l3gN=m9~uIB=QQ*8j;%bg&sb2^ZO{F z{=@@4stjpD^MD)1W-8fnwBuQ+t;pQl5VlXz8#W`$ZpiLx-9@zSKB_R_p!fDvV0YeS z;C41XNBa@gQUAKhU~zz?9M|)&0^zNiw?^yA9P*I+HnAPH+4(@>MxBjEPha{d!>%K;H1RfvSle5z_}Lw#Bmm9|9$hDG{mlczXaJmJ>BLXR!teao)U09$qnyH9=g`@oF{SSi_` z&UXY=Ac2k`Exd#+riOC!M*7)4TYA>RZyj3B`)gBn z*0i$xb=`PMFwZ`>&}S2;q^+S=%G8i9X|^0~ytV1zlp8-0F}!q)r0C@JUXDBsYQ=D= z9(m4iJ+9y$+6JOtzSksnZNe)+f?vD8KIoI8?>~aTs3Mh(&Go4Vm3vIF+ zaZNZ>@gX9DVlRRJ&8G>MPc~3zwyzoqSp;i5qVlKh5kJf-pv6a=47x=T6i2S-+}vg& z)XECzCm3e3b#hOdR->H~%=*pw<`&P|r|`F1fcHV`2SI!HR`8^|_if|yeVKGNyQhcq zz_bYjv^^lyE$A$GI46SnV7fTzrhQXwn*NXQ!Rp278{2;n&b%{ec{S<#5odHi@EdhPl)piCTEDUd4SCYvyu)}w-&t^5g@2!l; z3US1Sd&A19_ln0%{BgrO9Q0BA@X;eo9#UKrwzN)_mjtwJEfZ`2CzJa-c&SSPJ=5oj z+f+y^2w6oHqopc9GIm=TV@c-81)BDQY8yp%2-9LPKP;4A6jR%jG-jp>7wtK!+-DI* zcWTl@tT2D3aslM@= z^T>g=VU!z5+-&Y{j~mkr7uOj_H*(I63(oJQJsJ&;M5tNO@5$eEy^F#4OO?mmPzMt6 ztNT0F6yDgx7N*RRsKBV4J4JnKOH7}VS15mbMG)*qK=x=G>X+P-*@T=A#7F%2rH(V2 z5z09iEZFXXkNX8o*aR;&ey%)HB8 z-*?sS)C4nZbq90l&ixRz&XmhpoX0=tGyX)N+XmR`Y2#r`c9ypU*X`}5x8LbgtZvkZ zLZ%HKMqHaNqEELmwwC2@)f50-NUsUn(=*%%OrkvhPip}cu~~Q<@-i1h1*a4P7K8(K zNv*TEjPJV!Zgci+=8Xk{WF`r;dBR}OgL4!g3RCE4wcO1iJ`%Jr3xw+x4V$orCZx1H zgBLHzmZu<>{pr^xgN&S>pU*ZyX9Kg+`9S|zNB;kBF%`#&0 zb7sktZNW!_j1n}83%)a&iy8~P>5;RQLC*HH&89J0YJnlmub$s8;Ek-+zU0|Ed4khK zu_~VLxT6_23C2Z@b1b*DYfr%6l7kVHy(Kb-$y;t^MyNCm7_-P>)wpnqwc`FoO%3hZ z-?gUadYDb@rLBfp(-(9{6E1esm=^OWdpnlooHE~`u1(aUf0!PECQ1S+c3fZo#((YM zM%_7mb_@$d|Lbt_yrD}19GZ_$bnfP0EnI-#*H*M}b^27u$>mP_@xdj-ZR6%)SI-3v zS~jO}iI^dGoqc9#cD@-!bW+T1_i>ZU2=kg);pP_&2$Hy$Y!6Bw!k#au;rfZA_A{gj zR7<6{lkGUXUp*#-FP?x**Z+l;GL3m>Tcof)uX!l6X6i?S@b@(0UUtAwp*u0DJ|1~w z2_GX*@3CkJ_$bv=Al*oZ_wdDoBg(G{#rG4Y+4|igSf`!8Tw&WI1k=duXy6!s@=+uZ zrrlj$BJcm+pTX=&%scTX+SbG_fr&Ni50cy~UiaO&0f85SGmpN~?fT0P2Z9>f*XK^& zsLfS}DM#}k0z-jNKvXX2%t~{@TyvycwE0*b@q?#I>@nLD0cc&w=;rIV9walH*bBxS zK}g~|ay&cFeR~}%-s&x>ix3g>P^&%fWs4M6f-@81^?L?KkRkg>VZP!L@iPI+HXv=0 zMt7zk*+}*seO8qNv-b8L>}+q}LRM%tvLDWVj`JZbC``C(AC;I<1M98PI(;@Fe}|}q z0RNf)K14B_NkMv=DTwHL7)4B~@j6VGKMkdm?0JanS=Uf1!TDGlp_*oeMp61(vsmXr_{UM`{MST#2qC#~MkT zcrS{Tpax4)1Bxrj%z3$?R-H$q!*>dL@Gp-x$66ejwfH+pTH&G7*3FBKP1ioGM|{0u z{E3@=bAtfJB?qlByD0!yz*#Y*E@B-OpJwJqD#JnSSM?*_4FqW~u=b7gK+iyGP9Wp5 z=u@fuiSn>|Q!f;`C0^Xy?gAe#5gvxApbT`|Se16#%wSavzwaq38tmFPd;*N87xoxk z?=@LT!X%MfJcZ!#<3d8T5^RgF3zxdi3<#K>(tP}WN}iso$fO_2O|FA3O|oIZ-wC`S z3=M5qVDv1z-H94M!%o*s^4@Vleik)6U= zM}qB!i((rjCF>uvCqg<1s4_+xp%zgAbsmdbrCMhvVqy~a$KhN_9*?t#@IX!Ab&*A( zXgM!MwHOtIjt79jZ;HKv#FqQ38J0tr#457vBl;~%GOgex9gZ-2v9Ji_vz^hDLtt;N z<~BWR-gU7Kg1ty4+HY~;SDQPE zTzXm?)bO{n=Q#fH8Y@QRgeSx6A+7E)S9~?sS3DtSL=L}8601uBt9zjz#iEZWMWnp{ z#x<YiY&@ z=@i=K*3y5zlH;5IK5QkNNt<6tugHAz$LH->DqlvfoyRWme;VeH&LbeftXsaROth^+ z+~-dH*%kj>fr5z%FDkYc07yfM`rfH%%v)=%p4V%z$`gK>8U2ONlv%}?Sk8d(VAybD zQaUW4U)8HCk&{H%x%IsC0`6l2Xzo3Me8H z!yui4bjQ#jAR!<~NH<6gokN3kNq6_q!_drr{r%TkAJ$rX9eeHL@B`m{-*=wpzOU;% z^G8G>n^tlm%|t#m1cqfhw((;CUg>38P+lXb`D_wI-yVWtP=>J~m&Q%x>OGnckRPDAE_)sHM3RHXtL6af(QX=K2u- zplBS*=u^aSG7QDduojtQm3^CiSmb!O zi1N5W9OvWJe^#bYinvcYhx=Xq8jLf?+P@NIuFM1OZS}s+hzE=nnbp7h%?dN zjCC6`mJ~4~Cw`y55~gCbtU*N4lI!e>)AK?JNy*5e6cN*JgX2KH+ElcwlzY8M_DjPn z2c=SFp3Yop{8}BODx%J}<8q0QYCN#dUq6n^U(MD62)>;(5Yv}Z|O@WFC$nT zV>qn&tN<&AseaErrUNAjEQrm;AY|yO>wZW}QNWUYmXvs)9V{X^DqQnd+ON*ZRtQ#t z?Z5-Sx6DT-bwQNp5jFnq%Du%!2Mi?hMbcI|z7_?(-bX52N4pDVK_g$}Cyw3-Je2J^ zl1pI4@uuH2isQ3FxQUbuS0l-PcYPmhle=h*;DU}vxS%fzk6bm;af1eXR1lSqqo%6V5_XhW9D+ z&jw2~RQ5RpM;XijAj~nYFy7_0!#ONCI$6&z=Pp%fD(ei=XEu3 z0*F(7reB!;yHv;Dzc)^S-oHcpjh;^%wss3oCOU>3O_>d@+_lS4Z?>)Nlo;OaLGEm4 zi6Xwocjmj5MNW78PYYnmYR-&{gne@4Tz6Ir#SX=|i~pA6eQD^>=%4QK53d6a%pQ_I zKKr7n>?u^lCdB#X=NZk$_=DFV0|(!-w%0@A{^KnAAA_2#Szv)tMd692wDqoIH#()1 zSHGWjW@ioE#vK|5DA_&C4GlwBE0sqh?shU4$4Bd98fJa0%< z3JENHYFe1TRyb}!!jJTfE^(&}&{_+|B7w!(-u- zFDm%6m_&GnD>H$G7jp372fHz`Y8z!*Y(R$SlpJhhvQ!op*bMUCl_~|r(@^j80OD9A zBf&L_Y;Y*&hy9bbU+qF86z{(Q z>9kDEArqD!BFoZWYB$+yeQGM$Xc1s!_grkSkY-1Fj7Rr*=+Vz-D$3ZXB&KE-sk=Ip zlr!G7%}eVeJAuj}tHWv2@(p9`?&E*4UKgt4UX??zC*1mO8vyo4e1OwI(oioL8#)8u zRrrr-Vf~09wA!*^{hoIjuI3|Fmk_>a1gLxPy{bBZyWZ|M#+*%vdX8MY`>d=dDll z^;fnIpUCSl%^nS=cO&g!#1q{NIpFTiX>omuVK?=f3bG*n)ztUi!h227YdsHm`2GcU zt_A?A383f@MH~m6Ob_sO{27X7p+93uMy^-eFFwf#mWh*HqTXH=&$AMFoVWd3Th8G3 z#`n=PaPfk0puT;~HCx(9%PzbuX_%`<`X$fVc%zMJUfCGP<7&>K7}#_Cb$U4UGpXIaj3yK7Bsm_ir z_wa1ny^{X)=9$hPORvY>aIEolyZ2EIG2a~F5gO*3xM{#UMGZ7uAp0W|p$)JzD)|ou z`cJ^rR^<)RPOTfhTh2}8hcitZK75jgcGVGI1~ry-=Kfm`Z`kr_4EJ1;enuluT zICo_O(1qGN*6?m?>vr6lKNzAal!#tEE(vTgV>DASQ5A^TU=vkGkZ?~dm+2pMRkYp` zN>|OdcwBKAG~mo^)(2j|x1~fcSB_S*BSyL(sY?5QoCvn;Xj;oX&1(gh-`=htX~rZP zm*KkLePRY4SGqJAzLZ1T+EA74v8F$gBkNU#(=KH`2#hrQA&>*k_1~J6{VcK9FtFXr z=Bb2}%usa|g~=iRZ__`l3&gTNdefE&M1b5k z?`o^>L+72@G9Zb*dI-NarF->VqR0OBe|ADki7|S9wZfN-neHB;d~Qe`yD-^1R4&{a zwB&LYWyKA;j!@vqLPXR3%gEqi?)-ut@(9nEA>5D8XK|2}RlGO^SG319MlrpzKk{j6 z1(^|$vqm8C%=cL|F|qaINu$0=Ymds^aD?HJPY!(h4k8s?@s2UAqOomp^EWxSYAm}# zc^9K~w#1w)_op0bS)Yhj$yLm0Q7e|J*LNj~cV;9B%zkfQU3kHBTClTPIQGZH-R%cg zMyM9U<)ft^pzkV1Rw>Z;MLcbXbjz0th?_jVm7mM0Uv=&4X`@!zHxmhGZ61y#k^V?h zrvKgqNj?5%-+H@xFNOf1g0k*4yEL6Y2*o$@oVW9w3uJkJ4QGU=|In>U95R}+-|f`F zt+Q#ig0wHT8w4~bdom$gtQ_R$qV1OWJaf8Pe^5vkE+7z53@?taGA-u(@cOZvN33_t zFEVhNt@F_6NZUVR2+7^;RNMcQU($aKvj6^f*p?#v>&NLa$8?Wf*NGdg@hKB#SEoO? zGTk$oPUK5-NPq;7-#RbL$=pHW>w`)XcH1D)crQSPK8$>jNw_3uYyVoj2TtjA0`T0s zLZx`Iedx(ovrS!}thlm1JZ7auvQiQ-(>%zI`Ih}d8PGHIco#=5c?2zV=`{roTk88V zwv3i#$G&|RpB3)y&}Qe{VMS;(^Qo=807uv8@$e}At1$BVoE)F>dAxF92U?T0xi_os zOk(=T*|IvHU*KiLRPW>DgD>w@hO)FWUVlZ^#Jn?ks-}ez!R2~Yu)}}fcf}oo^{D(( z(TMQ-fVU?fF;a&bB#1Dt=kkZ#NiNc=eQND&f8NIr46S<}l14Gdq-w-)&j(eNW4jj- zt1@F`EX%ufGC8o4IfMntu?J!*F0-^cCFOTwHcjn~6M25|WUA&oE-3LIWA3$1g>^1E zEZ4?Q!Qhn7=-SJ|=~t(s@TQiR(47YjS`2wiR9;?TrgXKGi)~{h;{HO{B{_#3%jjy% z3%w>9X^Y4EQJ=!>>(^l^AB#0d?WRBWZvOUb%!_th-nND=f2VW!V_a7{w@gt`8frTp zBS}|J+O*S9>3J5jS2tlH_OAQroSBxOL)B+WcmrWiR~1~y2Vg*c16jKhUMF;J%WR6} zKiHIQmS%FUbQ^eAYLAqDB-19Ex3FJ-PyV!9Av;2XQFSF(Mn9a-_W%vVB3sP=!FA43 zmLsuRu88$KmOfGYo|2noOGj!70HD9Z?R5_J&famjH)qKEXHgClDsHs?N8ut;jSXne z5D}7qGxs1Ow;H%!l;yExvL1(Kql%2?MCMKuVf1$E}EW)huT}$^#LWo2*Iyv)Vb`m8DxwU={_zZrK zp-ZOzh#ezqodQ*CnQWu??f>(9|NL){_kkIZZn1~o>xdpu_Q71mv=DA{U5ts91}DY- zd@9!dkWZma{XT{JCgavLvMQhaN_)g@03fkn>I1HuybH5Yn*!5uQ`)DA5G(~ZIxJR^ zCvvTU3XF9{WJoXUQ-PHswrmdbhnAFr)a%}hZa9l$w3O`0+Nn0CU#v8B?tgi}^xxF! z%NNH41ffPw$Wn3k914ssroq9sU%6}wx==F&4hLAvm{Atb)CQ}`->c*-rOP#lh3J(< zDm)AImJG0x3^52v4QC}^Pr|+bbw%GC`Wd%6RXBO4t%IU5fFBcn?WGH{)xPBk=m5Fz zQuAa1i+YXnOC4%d^!WWwjn-_hjOR#*qUNMy5xb1JyBXCgCvqh!ah2GE9N&WszmfcD z=qnql*|3kJ0p;!FZnD;%>HY>@t(DF-rTyZPe+}uj2zSg07lf17L4|of`%}h;;WWo( zD4gxI^VGXe6`8U$R~x-CucLSp`nw+y*UV0(&2}=|vrj1h);$l9_T%Gjm=NG#7ueMfoU!K>clxdZV zPPIM!HNRxZAUG-cC;a(DtnquT(+P;K?(of4{|SD z9CmXq!(Ts@(VG%0uk>OIoyFoFG3w~Bi!W9^eI8~1Qx+$U+f*7rdZ=18)Jpo()VRHb zEIQVd;QFR<#Uupqb<$xr(dJGToY&)q!0i(4LjK^M?ccX>-}$_lp#Y#UzeznkM@1w; zLh6&<@jK^6T0=v{D%+o^5!(!3M&kNxU<4YF#{P(j{A<|vY>apNg#nLIrPjxoL@sO` z#LEhyC7l!M=BDZ)ee~v63e0Z?H;0FEzB}%~Pku3};@<0=P zJ!?;x^J~PT`=yK7((tCfQt&u95Fb|xAJA@bh060*YS5EQ^b*C?>k~t70II+k#*Ffm zpsefu8Q2{59o4RNBLD*6I0)$l{dp^t#PMBcM#>eOyP34oJ%8z<<$~~75;Ypxg^0r)ve)4Fo^c=n#}a23V_HknB3aZ*HVV)@Kw_Pf zR-$ko68QTAqcZqeNdUoz)|F=nw4Y2{)BPDR6VgXn8=TQ1T5b+6fzC!S7helao!%)2 zW9xjNjpR3*!DyYRYct*`1(pCS0sj*d(Af~E@!;Vz3-9 ziz#ba`=?u7zm_53;X1wRQL8rPJNXj8Teo9wIZ!@rDJ2LQG@6!xn@4ULm(2)sg%i=i_kC>t zkrMG`V)`03m8Q<{gzWsmdH3K^D^M!fCpx{}X9@;mHPhFqzkSUgKVnq* zvm12Y?Djm!oBNy4`RZ>gMrGCss^2oa$~97o!b1ESd?qC^ZGdzui}=EHF$4YWh`wBA z=|1YB1k{Lt0J!TJ*J*vnQMu)A8jITpKunNJsx^cx|3;LScm*|(@pHZLvg@29{|QI# ztHRHw9NnP8E;#jw;_mFn?!SYg2^I>wvuYihJP{90m7epQ@ck-h`lU(z1OGgT<2(R- zLQLeBR&GNd*!hw>9!_w~nOQ8$cOIPiIh|y*`A9??qim@RMX;m)tB1^uu$S(0Jz$92 z_q@ZR);X19}JB|^l>mu+~VqoezahOk4=B9)emOH8{I`Ap0r9zkk z1AwbL-`R}+?K6#<|6OqRw$lFl=i~W0TWw53VpOA$M-#|v+TUP^tAd#JA3w!>)8ez?>2>NIZ(m_*!@R{QdO1o$qy)o(yM0oZc1uzYF8P zecg~m=yeY`;)RvTssU(k#AZSO4C7M>OmA|4oIt|_vzZoYK0`Moq-GhFBUJTm0r_O@ z;EQN1K`#&ngdDuvw9}~}GCiO6DG;~vue~HH33v&7yokgi9v-|vFlDVL=hyON1Vq2# z^w_;TYvI%bWq5Ciu#lrf(t58^xy z4CXNO!XXA|TLYj8Tq630qIZQWS zF`6FvLg~|F3OsCwd2cqF?H?ZxkQ4pc?=~DQ(Ra6QxylZGY7*R9QlXAc%aNY4h4AYS z8hr`Bp0gxo4Su}$o8!EpQg$gX>}7&Q55M4CTMj9JXK59Cka2@#{||wWV!~W-=B`&! zI?&YDTZp$6@lf)7gVm+gu?ruP>9wXlNy+e6@xWD=QpUVfO zssYqU>#_y~6SALR+_LwANZ`37w7DxXAVvxF0?;b(Mj1|XQIR{&j`Tm~kA4ot&Urlo z^<8{Xy66 z7_^xj$YDhmC>Db}l7Ha?1CIQ?H!d|l?wZ3MC<%djIJ$(Ca{~jLp_)PA?~>XajKcE^ z%wKQ>0ENf_ibg0!ndbIsC5>p6phLXG@zdgO+eOD4R7TUQyz)Tg6#%eK76lXt%?ywe z!tLCCpyUUFa%QT-hU0`m5wI}hlW!Y<@u~)!ec~i0r$sz_RZXl69Ftg3GcBn!Pk_*W zEv_Law5RwKq40F=)x?f4MGG_%Yx@Eq5`iyHSM*)Q;c3AC)%7)hh;`jOh=OuI%mZ<07!{qfXD&mk%hLl=qI)i>o?+Y<9q*GyL zztc8RinqsgiQoMRy#p)bappr()#B{3Eah}PpI937b1pz4wN_JZHO^Inv)5`Q7 z{B4|qbtSf-YQM&1)_&tsVEDe2*#`WtuMh|CECFxxBEfa|O&jDB?3AKcd1RJH4)$I8 zhPJTy6Kq9TClp?6>kl_MHz&MV3Ki)So&i`4<#2e(qDp>k@%zyx4KtuZ9iH(asP^jr ztv)UB?|}sP-Fii4-w)-1;-pn3qzDQy8h{^PPZ|WRt}WoAGMH?_z1)M<5E=ORi!dA) zwJ(XfCmxI{AS`;DK1K2EIhu;6#Of_By?(@6fk9a?w-Y(_Vi@cB=0nTR>q);U9&gxA zmy6EDaLVypAfrTH02;|O)ogk~69Fd00{spfp3p;WhVgSyEDMufz!SZ&FQReRnb!Rv zFPhU33w{LMc#(O-D>ufe8SWuSP*$J6C4CCnw$%y=;>$~UO&o}MX6+iiQ#=j!c+r{` ztde4;A45x5)z-dnPvtrOey_{Qu|>s~-r(;ki{)aV@`fifW)DoOSh*-#I`8?zpx>I5 z@$o1y5(W)Jnn7jYJp0y;M8xP)M%Jli&GYT5R262q?G!E=i0qLS*Uo@g3jrzwtDlpx z4Ig@LT^eAJ{8G!!p5n5S*dK5a!$>?A@o^R|@5w{^eD3xB0WbUqhQx@vic640E5z4p#8je<0pok0?Pv5(uXJt1@{VP6U7;_Wt#mgX2C z%}9(&QJG#;z3^W?`Wo2ma(9yn32IiKEzShkbzbkI_EVXb-;O*HajJOnhw%BJ*3MnA z7;>kf{PjqzIrIZ}^bauwlgM-?7kJtaf&4vB?nuoFQS}~e={FbkZm`h!a{_lyhHNiz ziP4eHnKuKf%zugEqS)rDCc_q^*+iO@0OQQbKsB_P;Bt;U>p~4igUwY>2XIjV+jMzG zl$xNTcZr`vj$WdB^S(Yn-&GU%FF@%nm#;YS+*Za(t&ruN-=F{Fm-;7$W3Kq3{te7* zEpUP`;b^R*wlXGD>6P;^U% z3UdNS_Cj(HpA>5*IGa{9FaYzn77LdnQ6tTSK(4UAj`^HcO2TGkdaWJP3MXOL=DVe& zm*u)Yb<%;-BykO#spb%Ldm`C!QZM>AnkCgpkvuXO=%>CgQ*AR=X5JZWU$r-#k>MPj z{yRcB6#&y};2uqkj?3Tm>6ra;urTUrGuB|0@Z!~W!_TlMDMU{wjlcczhcu9S%Wq^p zA}=f7c&FToB}a_Hg_B%%qoh}uHvH{a0SJgeb9f@l(K|L+%F;;C$I)?gCn{5euj?0` zsi==UMuFr?+go@0jt6y;;ifP@3>o+yFTLZgiYT(9*s=EtvKm8J-%J~-?zMvhS#_UY z&N?qT-~+Nt5*mQJG8B6z4OwjXQAQJd#<=I~C+6bn-AU1ENILLmh=4;KJ!MKaL%NX9 zXG&^BEnuAlLT`fJ4FdF~%@Lz1d0#i& z!5c~HE)VJFnRVkF*&2L*T^QrKg+7}Zgry5oa7Q0gDF_cv5o)K>{;Dz!LICn2pIar~TDdIM=w;2%Xi7d97@^9bsL(WLT2TmgDS)knxfj{ni_mr295 zJsa`RrLDe~G5ya9#N5iF_m-;X-|o7$%C6-~;hN5o3hq8#_q_F}^b&T_{N@(ei!0q% z`Sl;6t7#9*PWFlnaGN_FX3phitKaDpUsD5-h2u)%kM7-b*0>_4$t-c876S%xnnA4c z-3nqYuI9Q<99ZWAnV}3N_Hv;m1^beyFcSjStK7DwT^&Z}@3H=@n9T=8v@W*UY(d;S zL+_2k{L+Wwp*gOYV#>BY)o5H`WAd<1T$6&YbBd;~X;Z7+fH>0XW8!=ZF}MQCR;Hp; zDh>hUfc;Bfaa~wbcZ+|mTAL-jKPvuJ?9fot19q(toDbkymS;{^;GhJiJ>kxDYEi*t z;kYZr9u;4z2vfLOY-mZ0dgBv0@RS`QCCmKzT0?PtaI~yP#go!wB`5RTKFPwyq>`t< zQ1UBpH|i^~#^JN{kC7000Pr??2e|+PTt}|SI?USXu?xzW>GH^RoX0z^)EF@98l2v!R;_CYn%v4~iDAt>DY4FBZmV z4dApnM}xe6Pk$LI=r|D4Q)5tbm&e2e9FuwMJyS563^Tk1^hi=PhpIx;={vqtNq&pF zXf&oM2PTffhC6z`d)MH7njx%4Tj^HvQsFZ-9izs}UM;-pZ7VRoC{Ti;w%crXRt*AkSGRnG>o9E{6C!`i|QE zu)vnv>jY)SgmsCddM|7wuB1WaXKLiuMnqQMtB0mn*!jv|1``C6V4J`xv9N!M92ycM zR_E>s#f}&vNJb8-RXlLR@e&3Wz(HD8p&1L+ASlk^UjB`w?1h)V91jDl56SQMs9hsS zjn6387%>r)=9pHN?~{|u`FV})<744KgB(CY$hb+i%DK|A>x{qSYKessbKGB~+nDoS z!D1Rce5x^mC#5^`Yr7IaobE-y(4jU9|DeosyQJxOVOI-5(C)z6oZpl z(T5bh#qvNpb7peoPEy>5!eCEg|Ga+YlCTHbeMdt!Ac{}w=1(H~qsB5hp3Dj#+rYb$y z;vlo}(|X!p1*hY}myCo=9Fpjt;Pv@_eZuH*$J-d?W;18j{YT}cO>GE2;P8p@PaOZ@8xjcP7uYbHCvDTuEvPk2j2G9%+jOX>xz#Kx?KC zjm8&qV%hC9G|1IPmtqdgMsdf|XwhcT62Fj`{Wo8DnB7^{*l~^F z?L$NKCf~|~&>DktG*k2LcYAW93xKMQOOgVJ$<33l%+iK{nsl{d^sL5 z^v`iyWa=!JXEXh#EG%(`9!v1mNVvCx92m2ivrGupk>6^6?dyrnXuqq2z>cPhm7Qk_X+T}v%GLt82X0vxv|S@=E?_rK%@djM!X z5-4Pi?s1!bK$AjDaRQB?XM?zE37B+GOBMiN5FA74tBGg7)OA?Z^i_x=h!{W|d2309 zTVp9UIf6@V9%A3sg}%KL;0&r5nuyT0gv6Y6jpiiqlqVKkZBGLul1aD*tNO1VESL%# zt7YD!q*6c)Asu*VNmx7T!PLUpHgG&?$*@5iz+0nmshoiS^%BIr`bp_CEX!(&6WmU) z=d@KOjiIv)@`M-+tXPYMz?DJJhI6)le1(pUkIMl{WWFJSD)O{}dHj(`zq$GB? zX_m4xMvycDmc{Aa8CU-1*^fRUe%0*?4Zci`0pkEJI_2k1ItI#7&*h!P3y7fIdYki{ z##!l#VviJs5RZEt^LFN%Zx8jAHZK-C;6X&WY~2?D{%YmCi)lDD$KRfB^uXl=U^B$yZKFe|5fZ zh*rRKD@ccO+Gs^=ILLTxk1O&!f92IY*mJq@23RTC;ruI`0;Y>y!<*q!)Uh zjip&;Q4gao7~C)Ur@7bnl=%`&C6MK;qxyOsfB#l?VG{}P(J?W}i*6L$$hRwn!1dlR zKKR?5<=5BN(EPX2F@gjd-TcTm1J5mmf+ZTupov%-y5vH=7Bwyyuui|zVwh|3=X8QP zp+Hf1RCy%k2j?Q%rB$Rz=}mLd+DpqgEB!-@0Hw#PuNmc38_`#wGH!x+T&GRSwT5S} zevxB4J0Cmv4fD0DL_xePqulxVC*N3mV;-PDmA?rje7{s7Q2 z{X5Uq49L@S)~dHy*NCq)2Ll!eH`%r3%Dnh9&lF>Q)~D*L-ZT$zbkIEJGKuHjV#8_A z|Ie_>8rNUjeg8RoW&F3DrmbRdAm;5z=yDkPepKSPowNn($YSN@UjMjV-@Sd`;O5;G zDfjkSktjArSUCaGDh5C*Lls~{`NX|j3WZ$x<_bwo{X`B7wcxFcXy^S} z-7y!@mj1LULwdP6`;T0oSpCN5{^dtpG9Rdw{39>JtHaqZ~S`ta%Wjev>A z3pJ&nW;4tt$qbz^+srQvYkx$ej42ZUl6idl9QJilFJ9U8=7G)I(lvMDm3Xae#>a^G zfa%_{g(s<=;9X!>ky64+M$*ni5Q}9db1TQ>Ek}CmVgSMvso}Ls!>2{KN$}fLn4Xp6 zb)u+4==nY1{1bm2-~arZw0aoJZU{(U&n<(v_H#mQMWx? z^Mdk)!SIumZ_E!*1LR7^2vD4OK1ENQagI6hOT1h+53SmYWh-$#T&;mptLZ_C3|h+9 zd1U=^(w5F)ZRlcsrr5scg702}M0_7~yUc_R#QGqHV|a5la!e5dmk|fu(7RHj#7@Z& zK9_Q$*Y50Jnyt2re%d`KX})-;$TU@{dbJ#nIZCFn_aR?3UpnHcY}HgJ!e6DPfYT)Kqw zWGse$8N$%U;aprwa7V0Sa3sntUN7#;eY~tiZaDqoI_92pfWzZeO^VGaZiRxzBwg*U zE|)s=RasLPv9b|pYUQaE>E{j4)n4BAU#5Hm)oEOpeJ!u3Hr+@ysv?q75zK~30V2tv z;YBO$Coc1=+k@6Xp_1DsqzPnI(@r+&jRQT`#YG>nE zM*<+>Q!P1zP+XUWqB>IN?z8Ijz&d*OwyQ=;eh8?zA23|!9n?c1`83CWy^Fg9N3Bb( zAmBqMUl>X!gR*U2zEXfx^>g}`YhF=eo$2WQ+nN>9%hj`+UIP!Yo{6K75k*u}SL)64 z*$flB>mV&hU%OEr8C#2;?g|=`6dac5N8KJ|tijl6%S8G6Vvd4zav@VZ3U}081uygm zOj_P<*MMKchDPgvEu@Iy$iRvRkvGK#n4C~y1z?WY5FZy&| z*y%h8&LR0Dt>{ha#e`x&c=iU=hg|<{Edo#s0X5M}Yq~hr-{$6-cSQXTd1|=C7sN!FBbR!!Ez@6d zcbR00eLOrg61V+AIX&}U{MB8}-@@#VoY_m3%;c&jxl-XD>Vh<%yt^2BCkz{V$nuJ$ zqM;-4#vh5Z$ApXw(?C1U(I;_I0N3c)Zz0_Z50rCMxW~_42|(Ej>YnkCcGb?5nH1@S zOm7CWisQOlk&LZLKIvwJPe9TpT1UiA_^%?V?(H5)v1__Sq-78zQ_IJ#%Qhet+{!{K zcgZ$!eD0L1!)`H!E+6?mbgGPyxr?%wLzw(O9E~p|1`)aRtukX7uzw0LsAZfV#cKw6 zf+-x@3IYs>arb}0xhCO6p0jp(&f`)}wg9(Tq+$A9-oq)FVKmU({HIyYF<&Bldo37p zr#JQg!9MZ7>fCA=&IdB?urqFE{MIxwuJGL?kkd2hX_3`Bza~02zd|&VcHi~^1i-5S zLqK39F1|Ba3>97}##-PR91tS4&-byVebx0@zI(N+dOnEF4`aZ933|`G;3lC*f(PFy z8wp+6ViBdmZc0_p8^GY8B6;$NBuih(IKkP}^P}+aOd3tx?mNk^Hoc)XqY?E()1jI+ zJwA3cvwp_a;fEe+Q}&q>T0NRI{y)5Ft4hUn!l?h4@^`JVmOCogMdC@0b9&+&p01cR z0>vfp!D-u9C@$&0WUS>JMJop;I1sWynBT1B0HoJ?%1Q zTYCogzeFIIb8Im82ZzWcS$$aIL0~91mX_Ln&Qy==H=R~hrs{CaL_)bMF^TzEykerh zdh?GK6)()(Gk@ZbdPbWTrFY&c(X7W4#a4e1-+06NsNLgv*sK*L;dgrRQXp9H1o}@d z_1D!ZarDdM7xDviS3lckC`OByG+@;TCi&6tg|d1>>NVEY5PI|d7zh>|p*<5FL39OY zH}I42g%0stp2SbDEBf6@wD{fUiSCS8thQeKDIYK z>Llq}3iYP*VsODol@|ZtWx)KKJe@X&%mW%zzc%H;L?UqH9&;)6{ix?yE41d2Jr3kq zr!{sHy(43A0mXFl1G4g3=jV!tURIMQ`pAf;m2w^IK$uCljE3j{+Aygd7+6of8&tN>4L66{ai3w>7^8MtXW_6Z9_sx^qt%rPf%J}Xjn#*e99Q{g@D*7Z4vs&# zfAZ}N-jHoLo0uy9RGQGDmQWQ=1Y71ZTyd!JjcIrNcCGSF8qynvFV;^Y(N$EGSQhlT z>=x)0$(#mM`Se+3%5T2V%>>+jxa*9m&i9*_b$&M@@Vf(hsx#NWcYtzFv#H5I^lt_~ z;voNj#HdG{|8IRqRuW%)eMy>p)_g7(>Z1PmvM+z5OZTRq`Uons#7#8xl1IywM@ylo zpH|AjX^|xRSQTH66=48aldEky+aG2pk%H&)WZarm%%tjxZOs$=inSw|Ryg~nco^Ue;#2y7)V2HvS1f`5A8_Zcq z4H6LyeU%e*-3vOc5u0majsHhQ@lV7gt1BDK1zy@79M$Cy&}I`{V8>Gihy+DI5Q+tJ zL9%g7E?DQZ;GT0sCJon#*4^I$|h5gG(OsowY`Tfrar=~#5m9onp1f#`t zUVa*W@@RX{fOrp;1p~Dfy@QB7n=EqtlPojJ{ydC_}R?7ej1bEqtOZ4X)P z@vDE>bAS2ncB75!I1cs~i8hEbZE@Kxx-moBqp@%38)Z<^8yEZ#~&! zz}~i$99f=VO6{UO@LBuj5aNUQbd}?jSl24Qvhd3yt)U2J=(U7(>iM5)u-8D&%6r@2 zbHW%v63hCmmVtBm<+g*-;Ck@L+Y9FN{g58k3eHJ0YC)G}A;GslO@tOXjO}tKW&8pP z0=21&P3@{ffXuX}SnVS}lwjYgzMf8(cL6I!6R<8;BTkiHVx*C)S(0U;RFqgztVzw; zUd9f;5<*5US$;cwzDVRj6M~xta+lZJ<*a2@x=4$QO7wf%_Nlql>^-i|x+PT@l*Zb~ zGj*`sU5~hTYE?T_>y1;9Cq|u97A78PrjpOX!=%`DO4|yY1y98yv{)VzIkuxki-5hzInK-CMD(R52tjsfCfGa*RlsR3w>!{~B+bz0UZ+gw+>uNd-8vMqht?eB}bsD<{f63 zj@pPxNmemKu{yIJKg<*Ny=&6glz{=1V};Qs^&yFBT%^t>`y!FkfqE&Vd@iP7_hExo zRgSH@QU7XJYG?4LW_y;5FTH*S1X(JP^CmPEy(ud?;93}1{gYX>n6}{eA&r_uVel7h4SxYwi=S$yk z+_L_Sj%%*!I!1oC_f2VQ0MvV-*YLglzsseav6D9&rYTP&bcq{#fi=6bP$$94W>Ufn zDW?3(fs~KZD=b3>FnTJk=BTOsx1OV1~B{aEI zUUjYGY@|CYl>V#!^2L($pzt*X%mYUnxZbw|tN{>Y*_$dvl3dSyU%&Eww6h1xmr&#~ z0|r#Z_W63ve+mEo_ZpJ_&18ptvFdq9F=O=##7{r^yc2Vwbu3D7qzRu&&rNvgE(6Vm z_WXCZoPe$AW42&%LU}qJL{lBa^pVLqw5h$My8QMvrtPa8+4B+RPKi^$vaZ(e$+pxz zk?LaHn*`c}i4G^xQx_~fQhreC0Zig!r+reRF)G<)E{sLS2}wq7@Tt1z06(EKD>#0h zhU8;$rTey`)z0oDe4~3;&4qnFiK%q_S~&G&QR4cjZt6gfTv@Q1p>#RVCz>Lz&?4^G zBaphX=LLed?GV;MODmr(O01|JVSlNZIwcFkb*EW-*KHThb*9N?Uq2&PvG~Rr9y+H@ zb-zIW=NZ)t!6poQD9P|fYFn|5Cy0=cL8ryv#glQipIcy34BOI6+2g}XSsZF)viWfB zp|8R4;%@aVXsCV->|H(Ch%31ym-+AHy14#XvPu7G7(;%U{&GDPAy-ikWa-WZHMuYZm{0i@m#$>tz*zf3TU{D#a&&JRe?Tf3r&eZRIWLkVKMqYGVx-wJU5^jhwaUcNI#ocrCF0JEKkWeuCq zQpkbmZcpFRCzFrY`E`ZiOU@%ws{3ald~P_qyYg5-6C2XmZ z%)ej?Gp*2WUz;GJ)XmwM)WKiap9OWXO_65po@UD4M2V#y&!piWkHEV>yobVfgu_AJ z`Fl7Mv1&rjZzJOcPo0m3wdKKQ!!bCgwWBolM#;6F$dFZ@rIDT1yETK~w+G+rM(Ofw zE$4O}jDhK>d@YAw07&ORP5V{Ox6NnR^-h~*cDku}mg}67rty5gial>An)O9#u0J>8 zZ%}^x;{MTkI8!^Ld*VIbDu}eO*5AsS5pXK>-PUWeL)FJ0o{t{zJb|Ps*gO76P|2J~ zQgqEFRE^>a1{)MK_9z?o)^4S*-jYpcPHM64_922qsrF1 zE36^v!A8ceBg&_&wyqKiLLBZI@f;4LkJ=hd4}YSNLH7EtEw&d##zjBdY}mrCpb7y7 zr>u~Y;Q$6RhJV)LEy8L}0~GPLJ`)R>Wx1Ly7@q09lPR7)FyHrB)!Elbrx{o3ho$E-J*$C;?7KREp zC?-MGku^qmaGK)Pe3eDvhd+mQqibB3nA!i1-q+-C2#@MzP!ip8PayhYeKv z=B}o`@CIS-^m5^!4BmfVzk-=kj1PsEyAOv6wnk(<(RVl--oM(U4mZwad64=L!YhI2 zXh+(}A6)aJ+>iIEQAy+u^y&grnW(oYY70qjQ#Vcp$-Oo@IfXkE%~Ik~54$h*E@R>;aDN}}J6hc%lPV4t=&rja{$pYIxFaml;?YJZ z@mhYh^!wQY!T&x>I^iTX6$J%xsiq{a|G&tUZVcb1Z25isigMP}i{^M~H;k7yN|02s zX5x>t&+;;2>|i5a)&&JheR!k@#Pp{+UKn^+q2+{Jj?}`cH$UNQTKgov819|g6;c_m zn5{`3EmS=bCC4ukIQ-*!ZFH*!J@$-mBjb}q2Vxga+t>$dMQMagq%VF;{$>7{LzaAowPT*(w@ebEfv5CF%UFBdXx^o!%5x#(TM9VQky5f+2{K z*dGl~DYrG1Vz3Gy_x5zsQB)AWNbHffK&AL`rktBXfkoMulk#tR(uj`@LS}x2I zG4`>^H6Z%o)!}tR{NxqGl;3Pz(h57Wj+b31Pj10*yzJJsFuX=wzaFEf#tzfWydvAK zXR7r#;k&o}Kk=^~u_%n4d9W4q=8fULB;Mm!`B$~mc~jF(?BbS^)M2X|p^Ivdd3VtG z5)iQpyELAOck3{`b@^uItZ%3$C+IHkUdJcl#rd_Lbd5W(btZYxplZ#!1G!F#wlh!n zAL`*oM+(dF{phA87~R={3`+qqE|g1r?97`EFJlz?0NY@#ILE3;K_{6x65z|%_#N`6~Z&F*(eyOxd>=kq=+sVV6O%gGdatwnK{!Ze;9o5Pn)(0(VT2YM+3?NVoO!mm$736j`xz zw}891dCC2lbi1`?mxYvnM_hvhk!Ev<^)z+_p&`;!Ik(oIWcWETcX9$p+V?7h@AlK* ziyVzjF!E=SXK5tJ6v}hR{Ng%~CY?T(jHp7=1UW zZxx`6v_LazbnNFc_u<_&#q*xaOj8XftbHqc4^WVC0_a5Fjw>S+9>55Hb8;r*aeYTD}r4e{eE=x})*1u3`guh>3F zn2Sw70aBt_3IEQ{EJb|3UeOLh%e%ijzB!ze(LUNayLAnq@DJD!gk8`u^a0Hf44F?1 zhYhQU@q>8tF>oM;KjH+tdMC}h=h(~_5ac@yuLm|*7Bg`B+uB=Lpq>Q9qd_Un0?v?j zzyi(WhYekpbxc~$S$DQkcYDkr31oG?RRYiVPQqSNY;b;3BteBR`C<9mFTVtZ<&l_7 zXgqQ|`!H!gR+(@xr42C22fmUULjm+P`r1qG=S{d_VEW}gY*V%;SW0fh2#&{F@uRFD z4NG=SdxwYd6VJ-@PXv}sLm?t9s!pf(PRNKJawNu52uwFj?N~|DJz%CeRSETb`3|HS*g+UmtrEei&e$o zUt`e-_t_6S8wn=?c&?y}tUI~6T~X+s*3JH0Tc~WVJK=HQrkby%z0w8ipJ;2;)~|Xv zXUyR%wCkB<#Zr1Hq_weZ=PVl>F>lnfoMXp#Dwr~d)*m$ZQXd(-VHW?5$|~;%M6Tk$ zaf6x*=X{@B+CC^t{`x;d^Zyc#MPLfPr(a`#ul2UNFGJz$-~TSI5n?8AY@dFQ4^VpA z@cV~biss|hAzA+s6EuMD+hCvDHw3BYLtkKxCnDn`@~~0@FD#|!A4Q8`qg?`}Q1o~xRDbGs?!qu6=&ELonE@X^29Opjkf55$t$gD~ z=(Fadbx4g||2Fxe8dhez{{9OlNsZAKENLJHWv)k=zQaD_x;cigdFn%=x4l}%s-sf? zZj}x}6%4~>d)tCY*y2o>|FA;C+qFSFQZHv#O|w8+q;#D!I;pJ578!;?}9o?hgx#O-J`Kx`*M)qQk~o-FU~sMRp1 ziKdE>v%kXNLST36x@9QrA7IrYa#GO`Z$&a&L&!w4FXR|tE8YW&fvcV~EU=pnmG|_k zF<(~S94r>cV*K{f8S;)M$9PvmB;{-TW!t@~!rJ%QyCLH2MhhjV_wyoPy&#h&?x~hV z)!@t;=+X#j9%7B!R@XqQqVeL#-W})fiqMq=7lQ_5BF*0_GzfPBJk-oV5y8{V^mG!*}%?;tNc7ybONm3pPIi1dz0gUlIL&T*3Z}uR=U( zMU1p;A1Wmov;DC(H$gi##+azZf=q-@e8Z?drt8gnQqfFnVc{`?5UpIupC-q|tlzZ*IEf|33h3fw1KXNnE~S363RuswZrc3J?w3 zqYC@^l6ED9tM4D>8Tt!}ajJI{1e*rcbA|FiXL_(}=~aabQCN26fqrv~Od9qFFp+aG z%CZjC&|m)J#|_-q^O>$~Zs-DgvMOSBTV{a) zp?`8g(eI>zFL&RU%gCxIl30Co*XI@<9^reoX0TXic#>Co8#w=MLF#i?LoY|+&k}0> zuY$ssf}|Tdj+*()nQ{d}p_JaraH3%Or=*(2*|wnAt%l=x_yKcdWdJv4pqWLJ0@F z8s6_d)eug(zt{cZ_i7Nmk=!z!tM`?$TG1j`p{O3xV4~9m8nWj=IOBx&^-H_i#88~T z?o2rd`!?)i^mz3kZ{Y}fvzdF+w{z_$E7Lw!g)GJ$#d?McpOtP5a{A@66M7S6A(aS2w)R;Vqf&5M@z2PX z6xiAg6kA|=^dT;z-IS-UUompk3ZFGS_~((4Xfpq5*3Xf;{on})6FY`P8lkHh~F`+qz9>s;XB-JHiGB8q}MMF?*u0H;IW-gH}fV>8d= zA43ssPr)X60p~cFpD@n3kp0U^bR`A1(>vo}Iyx-R$UKd32uUfKR~nva9?=jGklMTz znvRA#JaHh>e0arlbjG~qrp5bXaQ%rsI~itd|oyG-u$McPg`LA3^kazK!@t56?G*tCFd*F zteHh0dHZq*-kShoTZVhaH1@(tN% zGzQ{rJJ~nsH?Fn|ZjY--z`WPNA(EZxP(l3Iyp*Uj5r>5*HCn61sHbCtCf zEi|Hk9#?=g1Wj1{tT!FyEfI01@H{D!YTMxhDs=ff7e%J^u>M7Q`KHdBzP-AeABCdj z1{dm|!e_6Kd^YZ*7`WiKG~OTvnX^5gwYPbJlrk5$J{xtdQ1raT97rD@%sIa4BV2kl zOd1{_c)hTp1+xV=*#(icAz6|=mX^k2-b=ZY_pHbZvXvDE*muhjWAv;e#;@IQ3{&_S zat$pmi+#Synt3cPy{o^!s~ha#K8;hk!ES5f5_6CCzVN9#mk8s#r+oKt+-FM<0tGn1|An$(fwY9n|qbk#+z^&Gz%KE%)CSxQFGaGeTn7dLhV)Jn9tUYdy!Mb)WuzGae<2G{RFs+_$%r zG9dnxo=%YkhdR1IFezFn3Vf;l`l5V~FszFQ`rEfrcBXs0Hj1|aZnI+(Yepn*WLgRV zwv(>9-5Hn%hzm1be!R^V%hU>I^57up%^Q}FZyEnc*cBvuD?@nOqwTiZ zTUtFJ5Mh=RkoZuVasj%vo$v0Rm02?#*tbrv5bzf#OOpo^hWNf98tnz8JiK*5-hD-U z2e!JBhhh^K!}%Nd2Bxl7K@8?;% z4&f(C(C{K=6cbEzMDp}-soxl>6QtF;-33G z|7R9;YBYY(z_P~T$=x_1HG;KU5pISGbu>ptB^)DteY$|3N56iF|5ff0NZ*1bf*0l@ zd0zi3=c_pnWsg4RSEQloog+xmA7(oW(yiA`eh#*5j??wjYTWtx^2p_Y2v69*o{-Lwlw5)Rgq`v%+& zN|x`+4l+W{L-JVh&IIzaG|8bhT{-XH)GiB(VJx2Ss$)#rbT4sTJjhLJ zT_-@TR-=FBVKKX4*yrd(Tq_^&6K_YGVfq$5>kX?9mVNTQW7rZ4K2OTC^MT>1j;SCX z4KI;|eA^%Lw-ETUD1TlWGW`30Hj}a~Gd^i%XbreYx;c@)?h%cbL0J`B*P#WiBlZ7z zh#1qAGs0W#1Efs|j$*fn6MgNo>8(6wb_j*0jY;}&>)#qq*oJrW%SMaW6_;CH%?okC z+e@D$46dpcV?iF@nZAy&>7)pLx@bmYV=Kix-zCApx01}Er`T>q^T0et%DAK|6^*M- zv$Ar!|J-1x7&!~kXz34;5A2NCdg`$vzc$L<77@EpD;;4Adr%Za8ubu+o_Vub0bu8xIetc za(HpqMlxsW`E$0y8*;f}eZphFMGiUK0+WjQbQ6ePGJ&I=YLL?A5TL`+G<>IQOj9oy zM>m*gT&S<;6N^PCB**-ja4-sq5j|cXzkozsb2DZ)-%+$ob<= z+7netAparn%fy?rzfs89vio)hhrym-(>_%mZO`Q}hK+5h9U>_`Ggpp{Mp0nEj9zTF zd5!q@ka^k$^`*l~i1uqSY7y0sL~V4) zk_Ried|@lkLEG|^6~>4k`qoFakL2s$)8!09tAPw?4^wV_Bir^^@-m_IgZTDH(h3dX zXX5LqAJOR*-Y=F^!bz0EEdDU-{^Z&YRI-zbKea#ko1$(7{$^HT zx%N1~=*9)&0sE3(9TKhWpHu`!ub>9~0^9;e!;BBz$0g$-Kgg z0+1Yu;}pc$wu=f`jN@@$RgdI19?75LDiGCEY%*0Y{ZKMz%~Q`s#kr-U*_ZG&%Dx@- z7DAaD{00F;4ELEujRmYX+$@zoq378K-!;IFrob%i6s^J6Ua*Zia33(wAN`u`q;8Ss ztY>*81aS1E67aPI#Mqp}`sjA~y|BP?);A}z{Uv~c({Y}vrVnBK=z!?kj z(4!^rz2mtwL!8+9Gx5f$CpT)njOVz;QUbR^Tn)@dM-n9^eXk@Il%Vfa1n7(Jr#a1Z z+?3EI*P5P!524t;`ZEjH2*e5;BJv~c+>~IQZbthMz0$w+Ufb+i3$Iw9V#9JD$w@+kgpfM+D*P3Glgm z!=DM~_wxnX!GGQ^%J#LF9(QFJqpEp02D+w*X<7_EN0vE~TJzDj!00Lr!=T@TCa4zn~0!!kF zwcY2f(>lAm$?CR;d#VW$ZXKdJ!sPTyd?}xrBOp#IH!`wn^OViS{ zja+~DxhILBAHekSL)-;KS;uEpW^FqwG@G_>a5eS7f1Pl@rS3qaoqewDIKN7=)#bw2 z<7nsK!@{SAKXA2V4&A~h_{$YF<*%fy`?5N!s@TliJbw}5k1`ptucz!Ee^(xAz`ccwK z5~9f!Ibp7(tV~XwwJ@>F@}Rl+A?Lv4c%Kx-RZ$WB@EuLmPYin;NQ+TOixHs<&fRz| z*(E6oGqFa;%tuDMiofF-vfAWOb#idOb6vWwt3W|Skb+B1N0$*=6I)ozVa-=*HV(~> zG(!^7C2O!2FLwVSZ1?@{O@tV>IQW!&4PZdPJrCk$v>oGYm`B^)s!q#4YMz16UXreP z>7Pkx%|Je|p>-ei3p6pk)i!`mk4^N=>sfHeNe|6GCzpiDjf(i&}T#@^~MfX+M(1rP;6Tbgmh`%q#l50GE3^S(Zcf;#z! z^~ z#j1V1%j&JsOo31OX$8=7eCL!cSi^=&P>%m?q@~e1|8_x~w-`1gEXv~UxvKKXv~Ifi zi0;G?3oQoBzu+9CHd>?BrZYG?+G~Iwp*|Te`}CV(7q>c%jeUre?c+<700}aWulQ^t z^qVJ-F@>($Md7i?&K-E^1W8U!xd6Cj&$MFr7#kbirion^N9z@Kp=-#BW1qI%eG?7( zIL8{et#1L|vivmFa`gbC589M}ZI!`KJ^xI$;!ZdAh#ycc11X@b6CG z(R7p>qFZ}y{4YbkH5^Kth1mlV(WkI3s_lqgSa;IjG}g5e&S&gHtI)zu`tZe?-x%ix zx8ye#i}4r}06duD4qG%E&~P*4mNV`A5H^2b^s&S`HV$egaV@hqfA2-Olp=Q`p{o^Slj;|%DZiNbsyh+{6{D$@}) zr$nb4h#E796()9O>6_g3`7G~NI;;k?`Ap4EhP%41X|+3%I54}}z|2oyI;zMnbEVwF z02MPv(=(#yQl{lA&MrsmmBf8=Gp&ISrHhfbk<}lZ#~rV1|MH>Iv3GdMnHJ1kcqLaZ z68V8DHr&z~c{<;f!&0*K*M^#cw(pCAtc$leI60M@C%)`Ipd~*~>(ESTwZYTJC0BR$ zo=zX3#U;}uE%(7WcZjgGtn4DWmm3=>J`6Mmt7bs!rIlr#O%P9W-CF5Br(_bGQyk>> z%L2Ut|G_*TEK8jG-p$=%T)tWSH8VmcSro9)17 zcawj#;`*`BIuC`PM?%N9W;;Gkzj8DeDHGo;9yWribRx+du|~b48o0uO4H&)C`@e!D znO=y3!=B55bvQXY1PoDlP|Q>f!04CMIV6O9)rMbO#f)c=f3o=6jJs#Q;|x>0)0F20 zu31XV4qW6pZZj0a@4l=hvAMJ=d#B`Dz=!OzlJ5k;A|fF0OzrJwODt5=?Y*Q7@lCq8 z9|K)?o18@nQeH1@v^y?amu^9dMPW?x}bY>^&)`o`c=j~(+EGUVVR@}hs=%xF(Mnc-hcQ@}#UR6zPJeJ1i zqI(BBN4wqJ_S@&?X_+bMwz_pH1UZcz`e&P!agQZX2MG2UOy!B3N5InJEc&}_qk>&#YfFzMA>#p@k=B|P5n z?sM(}=_dn<{G#$_bTh>db7@>H#rwxqk(uToVc zx@l6R92ostj}2gbrY8)De@D#a-hk!ZW(7q7;p}?U!Ig?fpAR#<9a<5!sZ52MweS#n z-lGDHkFlY{$Zu>Qy@G~Z;M}q4gUJc<2GB=-JvY%q)xJ|CJLC?0-Q33hUx?@Ladhxs zFvvO=^zR|de@aiu^x0R$`WI4G_37Yzfb8S5uitDKKdGZt(Tk&XhkyRqQC8tTlMs3X zMmzzY819i_$;pO4Ng>4psl#k!wVocJ%dmG$q1onq{%^iG5wGzz)W>z4d?jPQ@vu;P z(c*VEK0W!BFRXsy@Oub{;9`ziTLhj^)WG7#|F~43tn%3kG zi+f_W*F^)<7_TdOXyCy^@;F=m4)62U3qQ&@23yE8VQcHFy&t51D_k)1QJGJ&uQrkN zlMzpu`Hm)dYl$+HgU}CjRTaiSBkO(bhx=1!LSu~NwD>i)?I?gQzQ;=0-klW}mn$L3 zEOsf(q0uC=>p?I1d2(T48Gb|YmK_JLyfKJ#p#tU^>D4cQn*<)9bxX6oMk1xnt%Mw) z0VCn}$Wi|foE(mmq;okCABXcQjM!n26p68aO*;xCv?-c?>#@urIM@FwW)uH%_ zAF`%zg+Oja!mEd8b(K9tJT27-7r1dJOiiO#jKUx@wC8#njn$5@TUDp7KLzQpV#~dw zE(T3eZ{jvnAXBBdL$sHADgc8^(nBx(>$X-z*VaX1cAsXOjsO+@@AL8s@$CBtB*NW2 z4_$c+ameeHT_*Dt27m*K{llN%&jv3L*_uJ`jX!y}Y@g=-{mvsaYlp8(E)xj47_v}9 zh_r9@i3D)PaNh~27IY-CYTVR7ZnFR;y1-S;tov7MiQx;U3pd?VH^y z0M|s@su)tFDdZfae~RTgpZ_go?bNwe(0%LP$Nuso+}UR%9Nmmju{)g5iGO zO7gn;yhhb#~#CGuU;?-SAG6JI76ubcjk9t}5h4^OJn1#+WV$M)1P1*=-SJ+}SG?bI|cpGum$8(~CwxvpJ!kHSLn#=CdfssN*y$$N<+{ zTG9f54ee_9`Id~f1mMO|l$OKL+T=yCb8!>j=rJpzpC1})dPU?B#KRzajXqj0vZkpj zI9e~fuKAXHbUJNA)0DL(W8@mQfA`x9?9F2L+goiL6ZaXEpruv1$2dBc*13r@2kXP9 zmr?Ry^LOiMC))5l&uvZ-zF`Gc|C;5HeG@X|*JylBFG_Iu$RN`0W4t5m3TSQ#*Qm%0 zU#?3q>|{49asV>Pra{^LP)|MT`V=&~Km4w6udkYQ8_(JcfrQ0k)&5wez05`T=cHCV zopm<81<78+EaSK}r;AIzx#u-cZ{6ko4sSU=Y#BKjXaGx=9gJ2jKBSu5H~;4_*59k6 zEqF`(5ku8-qYYi`$45kz`z1u^Wf<_EqH39DeF7`~bF&F6+toWnUC=ruD(rmsv*um6*%X4zIZWb%6I~~4Dvk?SzFmBUwkEw7 z(vA$m})A`U&5yjvSF1Dstr8g z^pv;PmDQTt<6o|rI5geKFuvH=Hk396PY|i~s}kj!KaxLqIlYrq_qfp2*N^DxVopv@ z&gYQC7ItbbX~Dw8#4IZ>U;hAo!)R?Yq(8$g2V`PN2WE|SuBDg7UTe)jKhZ*J)9J`E znCaC$dF;5@`?KQj5>2LGT{QC3G=N0vpKldZ)IOIMCf{XUGk*KItp!&oIa$&1=T1mk ztxOI%g!bG>GU2DId}Cj%lGPj*3gY@N)xaH{HJw-rRNiW6o&7BK?a(2qS&^!C=-4ws< zJK*O$qMU8Bak0mZ0{n;58T;e6t3%E9w1`hPKls~Vu`~3wY6)5gmepWSr1!W0RVG&w15oGQpUD~cR&{P**edi{4^+$kxeZ6w!|=lBojaoJnV%E#QxZQ?co2b>dV0_JwA%ugvu5^Bo;uKcgc}`T$zUHIzr+<=q6|^ik5aV%j(5D_gS5 zY;FfA(0a{~Ir|JvoJfTcxIBOQtggo5gv_m#>#yQLse&%U5ETr)pQN)m9s?K=&xpk< zf^n)n-v2Sm{iczWOY&!nyaS6?+^w7;LF}>rPWz_q86gRkNOfBe1r09GyXQzVqH((f z$LCz1fBL1d`3W3E;LOKdzAxPYtCrxKEnw3LLIIeUakc8Es&Yj9#4F1%e#eDp!-w%s zR7^@DFI!Mn2GG`uae;(>^BbE3^sq+jW=nljG4c4UfC435o;1@S%qY**fD2xAC-r0+ z?IP&=Sf?Vaj1M0mnU86C+UtM45Pr-J!An;JG^=_p$gfE6=ZsiM0fazQEEBcrXw4h5 zl5wgZUukcZzBtiNNQbxW7Ch-UYj1q&jj_Vs4gBuhSEXI-mO~05U#ITJ@T8mD1Dk`5 z)QHnf^A7!r^X44K6i~6x}1~HG(=4DYh3ibK9d&y5MTcZQe zm8&99WFOCSLPxi|ubiK79yYw!iM?8W|MxatHmI+cFjiQ+-sy0!tZ+_u#!-ml^iRpy z@Ak({jAVyF2E#&$vFPhbsZWMA_u-bzVh;8nf6taP3ngzCNsSRE=@J%wjRn{w6$w+p zZYMVFG0cBjZ)da+!Zc=DtkCM(KSQhXn;6ci>tOBNq{nsnaHk*PDE?z%D+F^+Yden* z7h{rD{JLqGe7%9&dTIaXf#mTPefD+A(Ai3kC*7ioGQGzn|53af)S zq&3i<=9UaDCDAU~*km7LHy`s}dNw#(13FiJ{o+AI5%1s&)kTvX-8V^5?+Mjzl%@eS z-}Vm;sJ`(g{AI&kw(zbjpOxP(jQMTcJtcz}_8+mwo<-Tyi^4fpeyz1kU=o*$xQRFF zK)t59nhMGSYT+3z+Ad5EIMkBUv>aKB_Cx@zjE~nL(|V~nF7<~$!;A(OyCqYdq?F|<+62F zU!}@FuU;6J6htZl{e~+*KMNCh$uuLgS?7{ zYEQ{->N?U;7MgD%qg!=P?EMGT+`GaaLlzz*2am#s!V-nFnzoj-9{sd84&VE-5!^82 zbZ7H>{d}p#h}7DEPw&zSp^#_2!%2_9>8x+4M(eye{YB?>i-$U0)?aJd=6-)|p2ih` zbM9AdO+*M9IDtIQmA(D}2;gfy^dQ`6lk`Xq7QwvZ`IPElVRP~iNYO&iPMMl4M`!Y& zUNP~2JLZVCvI2m;oztoN1_~rO#q`66$k|zuPhMUH6?J$y!*q0XWtBgbnuIf%k6PIrfy37 z6M=lHUv;q`-BT7l{qL(-Ael|z;@oD`z}N=6N}j0R(cVI6lHvKFor1Ez=!!JQLDWt% zrjynEjCh;XMBUjek)Np(e>GgwH6(F4Yw*aXK4lVs5CDRBtceGikh$%gQ9u2jNjTB2sLUW`abbpX1~-Zd+>BoJYhHrSLAIBCh(^# ze_1s+qEfsZ^Dzn4JCYXDh+yB-$&PnWQQJ$r#eplKLO*{gU)fQfP(lMaBPMTyY7z7U z?AFfkUbLh>m0_W&3Vo9WuZ$dT>k?x4h+mHEeU@q`ibm@^4+8=Rke8)y;5DQ$9~-NV zfTD=nCLC7_wZ0Q?e#0G^I_#;>rW`FX&Ea zJb4*_1?!LoB3MvDm?O!s1=s*P+IcNs^!`_@Q6r!MO}P18eJaITQWNl`efDjlRD!?= zx@SpTmy=ETVU9AR=tNTZ%BGqVDA87Sjob(@Y&FJhMJbX8|G2l47u<_5@yiTknG{U+xtdBZ7lR;cdk%35Ode!CsXC< z;Gk<}7T<+7c-eSLmEz$IU_3Q8HXb@gC8$P@f=TZ7U&7pxpW)NJ$(j;wv+qu0PF%{5t(}$I$g!Zw%NiLtH`Ey82STW3wV~la5t*NZ zY56>YogYf=1%wIx43L))I}Mb%GM^4p>`m4Vje|RRz$y?0>c_h%dQrPIbPHk-bL()A zeDvCcoFo9O|Jl1K3|>i#Gaz!8oG(-#j6LQM@i1smg+e2dRv*0Un`=8A_RxG?V6j-+ z91d`-VQxE|W6PuguNd1E$@1N|I(*wuy+Z?YEP2`&FWvcga;ArR-4bRadMvvVe2|`< z?}L_@^s{&;yyndOmGt~-9?ySfmrsIUef0hTMF}FW`X%I!aQ zK>tp>HIJ;x;kRJiH+V6uhl4w#E@Wdop_>@LlId}&pPK)&G)58lwKzPSrF^hhE%!!AdtO?s(wQRF z(_d_jb?Dr$c?&ZTxycLtGaHK^NP`Vk$OEPi+q~Cuup#GBVI5Y%^gqeC3XAm%YfEMr zl<}-I-%&Cgy*7azyx&epyK;w-pI>(ak$X0;SY;6$Yb# zV#DSwqkZ}9Q~D>$-3}TLZoQ6ah(nvza$hrPTjuUZiocb9Wn0Uhn1QoBs{XQUNNOIh zjy=AYXInfeke_{GCv4{e$PHU^~2uhA3SWHz5NY%H0vHS zyC0~`J9Ae~qMr2Ct9AdLm(^F{TTx!FtNX6Aik)}Cc=0B^$bEik`cE1j9-cUoP0I5S zWFu_MFXH_R#m_95;6tJ&>h+*tLB1VUu}%-S{-xg5Kl>s~h^=wy{BPEA?ovSYkB1=+ z!PYsUWQz3-_FxbHH*Z!&Pf+&pzTHqtG(_I=XK{&ddZ7VJ%@Di4QN}WGF0U^y-M-}H zrJ=LXpksjSc@p+s^^{Y=Zt@-c30lq4h1Ka%6|0*n#jwepn!gD^X&ikG0P!AnRmzt3 zs0?+qm(5FpJs;`lcH5zTM-{@qITgk?L8jT)z zlkYcD)vSJa9IA*iWvZ()qa55q?1C4(=ug_cG#~TRt}Yg09%tDK@)QH0-Tz;dOwa`@ zo{I$!XRO6HV+4?){oWCXx+UB~=$b+U$c==Il5MRzy!W5ivui8x4!EI+bH==JEan&g zWuSh??K?0!XzC*aI*%yI6b;Wp;g^3a>_1YN=<)n{ngV$0B3kxaPjqUk$mR~^rQ!Xg zXJzFPT5<|rO8eCn0S717-?iCa1Kn_JtDC=MtU(W&k=lqeGD3(OJ~mM5b9@GoR4_h+ zG$K$IQv8^Hsq#M7#wx3_IxBs?R;^+OB{MFAQ4|{*S8j4QFOF);4l4;l`$gdqg%dRK zbUQZrL4#~SH$pL2J<=uztH+4oeA8o7P;_$vl)`CnvtUI!4IAF&;8n6!#)%$gvpP8B zu<4shcT`r@xNQS(|pHcjhZrX2aHO)fzfJF3kSfyYbH(vlDltK*N%a zzdT~f5pb{F@wvx*I5M+m-7-P$*URv>Q!(R_V_G7i-W-GbDmW&x6O6dpF?+C5V@%^T zZ&;WYwvJe~QT<>q9@T!^eCwo1c~PTe)~16Ah_|!YNGT7x@bH+wvfDi9Q&PRK^gpO< zO33Jmi!uioTn!3}?@H6Gu3j7ah{PULzbmPC%ALJgiOX+c((jAq3g$yrTGZ>GZ|9dbhG`Fam$cyqHf%v! znnITW7>pV^hJ@j|IAk=dHX@?~Y$Cl^PEH4~gIxmZ^hyfYufr$X9|2b7o9y}AA*Qy< zLK(;(ZRb3&?mk2?ehxZXifkwT&sv>dc>lo*BF$3ZxMvpBw(W8d}^Pio46?dCocSe3~`0<{xAwx%YkT z%l5&6rjgc229~G<$j?s%vG}0#}lT9}Bnfl>&)?D9N-DAVdYK zTt06-IFwlW+Ws+j`w)%s527Yw!M3}80Ji=Jlm(9dVU}8gDzYyVlcx)lwD{)MpUn-LCDhJ zMHnfML^&P~s(kyU?lECn=XMQ#Ae@G7*9nW0(}8`EAh}a(KQn91wrS-Kaa!HrJ-3lZ zVasl}ojP_P&+!{P!h$DzU9%7?i&CRJ9Kadv$e(p-?7q59}6Ft+@Hw7KG-@gcf`+ukmrTLljE@suh7*-H>_Is6@`Bkiucd9c&Fd~d2@PE zEqeX&R*gQB_20I87k@smO+syBsv3Xs_ciH&31u~kn*~?b8t+?*TeR)(VYAs$bGP}* z3=vgAh#|!R&Jk_&R=m~c9;&jxKzPs&5Sd`{hm({8^A7)cc5;dehxJajstq;}Z47UW zkWnSYttVQT#|Fq|cFX)o0bnaF*3KgnKw(g=*;` zd$?e1H+z#{F)%mn(2Id_Nq^ZWjU6YSvjgA`v<~APX$oP2AUYrxxR0ndqVRcMU{hbhb~x#cEgK*w9_^Bj=0hv?ZJJWP1+ROzv6dBFc{(C;xNio&b=G^ zrytQ7dNND|Rz}$;gmsl2BSQ9UuJe%~t%42RaDHe<*~ycua=aXS^*3o5ru&_e(*`b=H7ape=c{@(x46k?bHc# zPC0o=z$UFXtxx&Vw{HaVk8Yf({z$K5F& zXNE4Cr;wTn*F@eBBN^Mw<^PWNPoR|lMUk3wIq$gtnN=GhjECB=Qr3!iTE@bDgRleK zeoia3%LZ=8J9e(Ivn!Xg*MvJ5Q<$FBD=w$mqkvYK-$t1x=Js^A1g=}EDnj*3LO#@N zq_s=Tl;CDQ(nO%JZUdS6XSc_?u+==U_7~832AuYJw|^ z&16>ZR)~};(u(96!QZDEiV+oGE^KFe>0-Ks05c-l5C4!fqvTO z{cDhpwIS0aT)V7GC525G1yNLu0+N#g>5+m6Uw2+3e(m!jV&nv;wjZ9tuss2bdM}Lx zQote{ReXNm_9bJe5 zeiTG!VQF@8z?@4$eF^V<@!V%wV_rB0KtSdPDaDK@{B5r$jUx6;ee}yYA~OLg1?-X? z3f4a$*iFrw2WLSHkf#Gd!|^nedNIRX;_fE=O}>(uUY4e{U}0N3s@lEC(9FI@xlmDQ zS7QDM-IZ?I**tPYQ-UI=6&Wg-mu&jdpSRw_h%len4ldS;e~*BqCiyk}&m6Wl=8GRK zPQ&Rs$pzbFf+2z^`w}QOtgS#Vupu1A0$PAc2u#O6`sEhVv5Z~F)&;0k@-$j_3ydhJ z#Glr>4g@mRHjHzM!xqt#5DHoSl!JefjkK4{F=V;g?8VnP@r*!U zXZKx-X!1(#;Y#|O+HG%-{Yz*WGL4r}VXw@x{c^}rt2A~9Ij5E+*dzBVS1Q2oSO9ys z_UEQDwpRozpeS1+EWw1bKqG0rBRgUr#L>Q#D6vqX!QiHV$5 zP+>ogn3NoYEFt?esyPLk!BRawf_L&(ApzcfHY$jvH~;ebdTa3ZS(oR zRftgRKS2gg&G~hbzzg|#N%^(*|0q|vZ>yJv%O4w?LJ0bST(*DqpmtOxby*rma zj|+E2L|NhyU6gv)%M#($zFB_C!8~!rIfjxJFvZ25H;fTt6ms`>3#6`WOj){Kr_C0c9Pq{h5eUMfL zyGeId^Bwh>PEr30_nvM$G7`H&w_pmq_pVb^ntRd3jJ<6HwG7$b-4WvbG6Qy|+aijY-3E~HdAgva`(rn*2z85&(_r!L2WJ1b`X^|Hy`-MvVM z_5LAV8=m_++M=gXUP&l7J|Us*P_wOASG{a7m=%lJBg2dxENvA46sY|)k@PtC`PMXR zBB!vPk1w;@>@iu4)!$ws#4I*}*JF# zdyiz;tCm-tVTgSBEr^%_ZB)p#bCIW#vpsiEy+Z?Z-cKx&NzT1~(72TYQ|ZW)5PgA7 zQn0JCha$_RG3X0T<3(J22|?Lo_2PUZUL3 z2k~>P${d|i)P-E9QC!{VW)zqGLKH#5O0MVO;%TBCokx#p#nNhM%3DzkN23IG2(8c` zT_NEF$V?+xy9hb8I%^m@7R@&JF+O5?8jYL~cJ$Hy9EIcj2fJ-i_#Os#=p3uep^5w> zBi+&~CJNWq!9MrE9pUv}T4VOyUzMuL+uP!|#r1BTU#{JUnnrHtPLdq#LcLyXP`-IJ zA&|VzN`7KeJ}l4}Z%QKm{!V@4@dV}1>nu-9q5i+F?$kc9ihmKVOu$_?%Gos;a_1C1 z4mU^`-rwL*0zRaJ-tFG{wf8mU;jH&9Mo{KqX}f4j7bTo{C{PauRmYXBa$pMxv&=d6 z$F4)fd_>jZ=g%4WWqX?nRt=M4gu7M6%+T=-S6LhWO9>x`W>Skj(nqA8^7x9Cw->%s%! zIVoQ4l@=`FJFNCvpY#8I%A3Nvdg$S)vn;Iyz>lKe1WrwMFSyHiV;gyi>R4M#_usf$ z4jO!)D{P`cah7jo{#{vUyQgadEzXlTVP})A=O}=zd2g3vl~2}}v=t-9!kUQjpgS|Z zH!fBr*$)9D5)bsri1v?SReq1=U_8Ajj?PdB zBh%3iqjYJo=IIX%sFy7SU1!{JGg<%dnIY8Cd!F#=q7uN@4OlV6KWZZVlHN(jgVXX9p^vJzY)-XUC!Y1?sX6w)b%4bX91U}{Yhmw&0skO_Dokqo#c zJZkkq*)1Cj9c1=i`}O1=_$k9Q5Xn{Qej8s=o=f)?{Tz#cMMnCzn&If>OFxrQmBm)0 zN||E8aJ=Mf{uDXXlDnr^uOzj#>-7FYs?G}oNbSLI}XUse;yH=QoJe199T7YyKiMh{u!vz@5x za87gpY{cQivN(ZH7r19JI9a-^3~fo38ZK71NL5HCgfKK)wT_ETumaWBjyxIB8p6`0 zII|%$ieL@;B?OfS4Wu-)IjlC}EX)Xmlg5`oPZ6(lqJ-A@C5!-u+5Ehma>?w(#c24S z7sJ^5T7;#!_~gY<&#@CnOk>|zd{oNpn|$7%DoJEJ0++<`1j;`HcZ|kW7@>ZDb*^8k zhR1d{C!S5$lSd3`4~vP!5>Gqvho1vV>fP4Lgk5~FKH7yo5AHowxmGNCugQ2Pd7K;- z+-yW_i`~9DQ0t)P#_Um~bv$LRrj!_PTD>X27dUSl{GWABFGJCsty z&*h<$XkNne=0s+dZ(PuxG)&kTvkn*A+1Ga&?ng*w+^Ml2y@p@L(`@}7eiaY|n8qBl>_vu|W&9pLE%X%VED?yVyX#N_Vz$k4< zkXiT+7Owe${~irkTvlYY1no1kfpD{sSV!lZ?SVL#O9wx;)k<|w#?XNIj6%)0n#dxQ zgCRP)>}hT}o*TWC7 z60bXv+igPIowD@*F*^<`yM)RpAT(hg>cU?gx8w{`d`8>l9Qawa6wuKva^G+{B9soJ zW=>O9c}nuIJO@fpg3T3CS~0j9-u4QF9T1o1Kr(M2FicRcQw253Ob~DUvV!5sZC9?9 zkyRt~cfLs>?S4UT`ZqD!(P~;s4oYa-$F8N{zeD{ci?Y!4swj7?)7r_vjaB5gFV8%I zWAqB~o^TKe3Yf3W+1}n}>m$9+=nNq)V{gH=8O>-~z_V9}fv^?8ZVleZHiL8L2k<;59mrt_DGpuekKMjL>s7!m18*`r1rscm!LLdFEE*mLNT`;o$}v0r>9%#>`p5QK z{|_vfQT&n%Eb?t@L8BqG4NJyvL`yiZ-~rHe^x$?tZ>;D;S9JUsm8owU7B zlh9wiia^V!@DKC9yvVUBMYqdykNcV5zFmb`EjxsXuXt5T@#Y)em8UCBRcI8c)Jo?} z#pZ9iU(HMx35gM#F!CwuI_z7c@6WsGHb|N%6IK#95R7ggJ*gJ9yN=v>f5cH-SR-pf zCTcdpT2c&frX(XL(=_@N+MX+(xgZ%qLR?DUlGEhgoS@aaDo5#ky4srrYdc8^Tj`L5 zZ-+lC9kjMCt*g6DS!dZANw#up)qJ=j=fEf&@{ z%S`b3KNgO;hOIhPGslZyZT&i^NN`c|oG#$rENhNMT_cW8!0{LZ0SoJ1}Xdw>w;L7fQFk&k-`TG=Y@ zNft~iCS&%P+o;&qzy@lUg`00oe{Poi8nzOF=xh7la%0S38MGyksOjal4Wc4eek*y$ zjp9+FnB=WvJrZ%`657rf>w#PRiMixx_|&sZPvTED6f&6fQx{#3i3 zwKy{)kQlqfxBvG<#!-ch?0^gklh4wZper*8Ff|~Rfau%_VE$Dxn_UYj&j_Qj8!=^d zDF4srA(cZ+BLpcQQ5z|#AZ%}PGBRIzE;v&VB;96Yzf>48v|R_n);&i}ahT*fB+g-e zTE5AJ9qNA2$5~JunZWk$OYt>TX9?puY^28Dvv4d+D94Ifb8ulZ*o}q^I(LnS!TQ(Q zP8eb?*+bfcwS*5A!C`(Sa{QcI(oZ6utttGYH_# z<3gDqs>jlpbU5PvIFU&dp_=(}>jEHDfB#528Z+j{UVrAE-bf48zR!J$q_IC?NqbHZ z318Egr2_X`xlhxmeG2i?o_C+)(a0nBpQ?#O0~y!n5qr$=A{kpjFA9y*rV&kO_~+NVP2B!hc&dt0eP5R5 zHXBzC%^prnlvV>ts8<3g^Ou?_l1x(~eO#7;|8PmX*%^H+_q>P?g`D#*omlTnf#4$F z7FZu^4}rXiv0noCZk&xnTfhZL@wPsjcYlO74pCa@E{_%64iciyZX}HB83Je(xSrrk zes}cl>oRRXF8JHi3qlni(0tiNL%zg)w0QqukEtRVUp3efX5NTxKD}A6-n#it0W3b8 z9@GZ~M)3&Wvtj0)3q(#lAk+U&?Di1x3v^~|2M_n`tKs=N4~0)Q;l%?AV)j;rW0?RC zRQO<#_%BXURBQ{ERSO=Dr6iC1 zdQ<(Cs`Gfnr8+W@(6jD|YgY)>tm50~Hc}VOZ*sqL((3krz;_sX`JS(4oSm$|imi4Fq1=|TM=Otf+1Nreb+qrpW@$u)q_-Gd~wk#Fu4+k*Ncvh97DHA@_uZ~G7Sshzl z7&JRC+o@XTQ|%?Bet406IkBP*6Y_FEMQOjR_?jWNiaJ)n*tIg9Rj;I$Xgx?0HJyrf zxBlEhQF5@kZ}FrFghdeDxrs?gP_u*M8ZtN(7@QbO5iTl|N9j%82?LeOmv9CTE60zN}`o!r_?Hb?x8ej!(MDE>kr2!@v;`R z8u@0~h|nilPft&D-R3b!fUR_E8zeC^f)7@ca_Deq=B9J1wZp0Vj5jYpKE${$z%P>X z##f+PcO+GVZDzg!h#|FGxNg?mNu{H!%S~K&S!)hnN?goFha%b_0$EP=s?ygpN|+vG zPGQ}roArXSf+GEUsQ8+{@2j?X4VuUZaxm@KxD;Gu=#zj&%*3%yELDG{1LVX;R4Spl zDF}HrSXo4h6qs=4v-&pn^51KzvL29?P%n&@ifq!)l`9MU9F;TEzX*xXVu|Lyx$HJ@9VnJ$~|K zS2#~K`C(X}u&F93ad3s(i;~4L|KLSx{YxeVarCfd7*7mvdT8V4<^|l- z-Vy=|wycvhPT+Dg6_%@b#IEa_{Bif(XQw$Eou?}*2X%_BzX5({DO2Zr+Dn)IXjWSH#3TBR!hslzY%VW`7Zf9Df}?T znWVxz*J|!A@(=6Y1!4jME^!i{$|GLWo$ijABVJ)_jbkGZsD@JV@aW9QGGx+A{%kQB zW%7FA%%}hX?0B)>*hG=^6SHGU^rL@+7{lWecSIipu<#0q8Guodh zte3j~_nBXv%Yh?_;44AoxHwvpHK^GYLws(xAz;t!N&xAILS?t}q`>Ha#r;>MY6G&6G?n5y zcY6Z86yl`rkt-#3m4vaP0Rp4bc=yIR<)vjd0|apYpAX`PVh5-SpKC5@Q0d3$gZQ&CL>Wlu8M@nbBEd+$=@ z#5OrlEu3ARZ~4ALZtvd7HrkB$oNe?j{FK1(%A*qY6&_Gm?Q?EfnP_C5)X&SyBN6vS z4N8D@y+rC`YkP*NGDhjJZqfVz(8(2iidlW4`=JG>l|@TX4S|n9KL#N@#4B9YT@yNB zjfv75881QozQ6SjU}04z^A&Ji%i?RlNH3<#Cd<)C*6%9f>^7;~6gW(%%jP(G=Bc}l zf-p6iP!Er}X<1cOJ&)E^ldhdGRD9^2)23-^(SSw=O7?c#-8#s<>gb zXX~f*-+bw>l|VaDeF+X*^|=kesCXBre@-UAYDd7^97Gz`&JR}_2}$h#gkI8$hwg`B zB?7;%+t5tc0-x4;Z#Oh7BjZh#Dj4f*r6j z`Q7{yhDsm?$YB&}ke+k_&(=_5bQXipVrmRF8>|hNsR{nX>=y+0bD#n|8|GG+E{r<-oxW--pk-Ef!)$OKTL8@mN`uEs<>)l~IzI{0HXC&}% z!9^s;d;~8qa<~NIm|t*9xCPx|AjFSOaw05jweLKCtOPNMJR-cFO37@pe-LCw=i!eD zXHq$PZDBLYBYRzXmdVOn@$j2CW#EC%t(uy#(VN$issDkNM=)3f2aP8rvrF7+p8J#c zg6_|!ST=5_dDM6`%1941?AbOK`N7GxLX~R?Bi_}hzj-LoF5>eR(@7aYGZnV`&y%Q*X_Hmzc0vP;V%R^6ZSnrm*W*hec3}Gj^(8G@_ z6xN*uP&*)0-9Cc6LbSg668>P0h`HEL@k-+ZpLcOp+&5akqm~E}dHA`Gh0U=dD6ngp z-T9M>XB^hkBq8?X-*DB+mR&-O?%>F`o_fE<56KxeMSLAgSQWD*Z+*Y6><|KAEDg zIa>=q&1URBoG^G&29la6zJxTpn!?1a+WRB>?T$+vUejO1;Nz9hamO-CdC~3_=IW10 z10(WNM{We^WjvEfJq@XL?D_bK@_fB@W(v$i?;@p%+1kzn@OJjprDrub^NQg9FPIPSG+y4|Ni%KBd z57#RdP)7Y|#jo>~fhQ_clL3M7$jA;T=@0*dS75ztFuX#Ek40}i@LTs8 zu4UC1O~`32vJX}k*E+wGVY>R(b66iBfD;JUohcRsM-_0yGw&ognE&e0)ZBc*6@+?4 zCF$OWyObPJ&5q_{qd*jEuv9i%9BByF=&95y(Jb2wZke=_g#g}S7`yTqP5u5o5aQxz z%yeRWh6Y#!hARqcAiK%{B;%{WkYlk$-b1V+vR@A!FgO7ZcGg`yxLNZ0#S zT!UE;ytx%@a<*P*vF>9>0n=Spg&&BrDC;s*M{Sy2wudyImS2UyVN!80nL&cOm1>W= zNv!qIN79?g=XqlAWV?3lem-ycT8z3ar7dvJ(#L)7)c@$?$wOY!%CJf2^b|$M{h6eP z#evoxakIF>Vh;a&E4*^`!xo}PmzXy}RQtIl+7+lZ*hG;Z;dv~7w*Rhf4^ch(#&VKp z5Ir&J#`Z9CF!`BVa)jE=5}xPmK<4LD1KT{$Tj^j^8oc5`OQZMv-ye|7D!@V=OAW{j z)DkG8hdx{TkhzVe#}=BcowlN&NAqn8Uo!auiKt3BaN*6NzQ*kgo87&+RCZ=t>RR!r zFS-e6x7rnYe~Gd${6Hnz8I;sSUA%>|dn(nJ1B;rJR@E8Hx{gIn?$X_@X-st2a)m58@0=|Fy3^U#Tf!tdD&SvQbb3#BSrM1DkUEW74UH3m zO}x(5h8HEgWScmz7xW^h4&%5jS}Ez%7r7ne1;x`14IM-F9LUAz4A$#^V{%p2Ss(8U zc)P5PGob#gKn`gm!v&g>)1BDM-a)QWAyx2})XanJUXEXWxd>AHjO6#3S=Rw zRquUjVo@^A3kRw&nnUZPpXO$G?oQe0&(;u%c|ITUkx1SmMvmX(((h6aMn@Q)eKN-j z<`SbS7>_vn#Ymi=gU;h!MLGxwqIQnTSKQN|ED38pgvRYlKE3s%1d>vY9Y>kUj>3-D z?luN6gHF->&-+}C+Fjt%%lh5-+d>zoh1qkc<4RfT4aCLB?D^>H%@h!cJ|=aAep4FD zAq#2M?&XOJcD3NPQAZ!BzLRzdZPPgyW243Kn14w9Ks#2HnL`o^jFHJat=`Qeiu7y- zU;VYLvjg#aV0y36ZaZ(n!xOp%=A8P@w+1Qw?mJ7;(}@D9>3{lf zx1A<``7*4A-vMhau?|o`2G7ngrw$cS0P?VH;}w4F|EMe1;kLUjudS8W*Y^p!K4F^7 zJxKa+ceO)pNKJ{-BHB!i{khePxo%OTASbHlNft2d$Y-o+Y7mT^ljdp({VQ@*I{X{; zN|{0H1>~q#vwJ#ohla0VY?=@`;E+m0+z`N8G-D-GhD%32^rKqy15L_r5$lUw%cf8| z+|)WUFXr*4!m4J>01hwVKRRJ2zm)gAh?K6Hl1XwXgPRfXs&FQgVS)9ddfqa)-B=sD zSkV1DXPwf++-kM7CHR^t%#v};v;oW*cJi!j2dm#N^EU+N8UUS`j!Ky4gt!D;i)l+X zGNo-CJJRFi{fWSQBOC&MCO}?(VVGdv8(yA}3oCUiyZcYa4gK8KQ_26w!BPM3lzC|i zJQVp)h;s0MZEtG?JC}IVkgr*9#?lbNt(o?{6ohd0DMdX|72+x4HS@zZb|eTSJLGUl zxZyX7=y54GVuWMOqH!xh!)R_!z-lJ&ZmD2k^&77W3s^WDXK6j=oM3@_fP4$4?k^=t z*d!8}o!l`~=MRd}ew85JiK^e|h{n2bc6c%MZF`YHvUQ20Q_G6S= z96Cw`QPn7}%IXhf?DOWo&p04iR1XSSlzWx_>zGvZaJ)Bl*KfQpenYzS642))*yWc+ z0r;zRXPOZ;Mee>1sdy72l-e7F()}sc%Iuh92X$UICO9wcY)v;}ed!SAXBC-@UnS0u zHuM)n-vv-;+mC!Y(x-~-+eN&1Ba8mR1&8=gR*_6gEvH^cBuzchxT6W}M!r|qrAGn| zxIfECU)pkTex?BR`QTkGIXs3a$5C4$xZN!R^hf!E?^o8s_Z5wC&&?1lsO{x0Di67R zX0*mUK;v{{CJ-eqV(jd&^9 z(kJ^u)K`7}H&q_6=00;P>Ne^>p&tRf`_s;@u)BV|6Xf(c?uOCxLwrtLsoB}tcf`_& zxk*$Kk$Pg(V*_!#V@L1l& zQw*=I7xy6+#Ti*9hXP-Aj_JvB|Y@Y_!kT?Xs&q46+4)gJzjEKCLLXqE{X z#lK=U$4(#HTZ)K|rT%jMCZ)SF^*gU&}1*SWihw zcn^P6!)58rl0AZ9CUj`_yYX^Qul`nLT8h9T~NU6Td40 zkz1$c#w?=k#g#e7vJ~eIjAyc=tG=6`s>fuf@>%#u>>;NOUrD zb!J}Zp6d$cU-GvloR=z|nSqhlf0)h}@($0}V}ZVIqpVM7W0c$hcl;wAK+0>Wj--Lp z+miFq49(Z;(Q@nB$6Wz_s+FX~l!6lN2Q1NVQ3ifElq08RK|4JA)a^*>V70+7k3Pja zdt6Yr17mUjQH--z#vFYp$TS6E?YRG}vm#vVES=Y;a+gECt7hlS2V~3Or#jO{NCE$N zY8K$~j$4%ndxs>znN~q_#-$Rj_YcCxzhtO@mJD|Vn5 zIZj;;P+!7$t&7bE;g+A)Or5@Et;Xxg4VuS++E#|7`PPT#VZ%EfSck=v)Ix7R&N*h% zoGF5Jna2Z4!8~&Jn{d$ zP-pma%o42@{TGrPQ%Cp)AGU?nqWvA$Xy3`wYM9q6K6AjUTPoz0d1UoPEEez!>!!@7 z>tP#<+5Z?@r{nPr8Mb)HtU7G2Ku7n5Zy0|Cf-v&dAfe6R0>zcVB$?X-h_A(-fI&un z_|7Gh+q!aElQ@%)9)-}(?)M3rb@H(L50>0MMg&oiMwqfQv90k9XFiXtTjuitjC{$Z ziir3?FpAR+VP>KD`-i?xScGiUrr%;ASD3wkk7`t#(TvouhVT-Bd zn>eYX{&=WDiRXVO34>bl>|1ZHHu%60TqV;63hKZ++{tQ4X7Pt5yHJH+EtFz62po2lW>9CWhA3`1uOpJ!k`e*(Oi@PedItRXUThCh#*U>2-OHbql8 z1`y0F{>dnKwvv~BPEc9~ziz>grs*0rceTk#d+5T)stQS)TUlZ`Eu+>UM_A59y3p19D z0KwupxHgbT{YxXXtA=!oFx0E`^mI|l{(p^9|35aEda>tOkN?Ev)-wJZCJ&NJom3(8 z%`p8i)2@$Rm{ExOD3>#K3ACqchLIxH<{ znCD$2p%Be{5}^V7trEyj`z*N?C9Xb&L-@}cN%(qgdPcPqS6*nKN-l=B^oMA( zYt1!$=MOP7*$Zc7cZ>P?1?#BO4b4R@agg4TcPS~15K_hLhb;#LcF(e)kioy(pL#d0I* z4hE1C20Qs7is>X$HEQaSzA3K;P7%iz)~dCXRiBHkG6NtN0AWT)uYSg zVgiJScLb@yg=?AZePztfP2k{*I(3#AGT}hANNfiaZUnh146Teg1stVND*fEm95edy zX1L;#+j4__Oerts?`4_!!7`$1ju_l9i(6(MUR;*PfZ4>6(pVm4Vryd$caS%U&17tN zP_k4l#eUdm)PSYk5a`oRM&wyJfq1}*1%=->h}pCQquGaA;RGd#b`PSM{JJWq@oLG` z(dLKL(HTI>OSJ_5Jlgi<^+m2+@=l!WZD)9M3Ap5tFHT2&imM$g!5*9iu&&T@X} z`9;WAZ+=Yy%R37z%=Z5|eeS*wl7mUiIC|A(0!B)CnXS=b@k|1%y<%=5;xq+(cPs_iQEaL?=>RJ{mTyPC%GOu0RY|q9T~0Nbyct#U!cdEJq;1Y_x=z&ioooa>AvIt@qcv2_67a@w?b(RXXg_A^KErS_8jTb z`q*A+>?*Asvc-IKHwlk&`Tq448K&!65$YLv$azCLh$dTDu;iH>?ZJC>a=!}s0tx!TazbIT$3X>d*R;W_XJ zD|U&>E4~rI+B$U(k=f)?jIZ4=yT9k0S90eKO>9N+?Y^I#)VYXg;5`A>T4zG>WPZqV zn4;$Oag|*L#e2V98Be9RC2H67qWnw-YjSq4e^vMcny3$2^1B~uP&S%okxvhqD``;z zm*Q}I9YsQ2O3_z%vF!qyiF$n;(fnI@TsgOa5ZDwrA0C0p9%j=cPVq+Ods%JyRUQiu z@OMsZ5w9FOPHl9)E(BC4C)JT_ScGTo%_Nd@je{-IH2cjU#Ff>qMdZ8FTA8za5Tdqt z?}QPIE{RJP%f|8d;q!q~Cd|LM<9 zh1)y*NO=LPSiQ!#G_PBV8uvTecEVSf*_{E2ZO@Mx(1+_kc6Zx6+Z_Sa9YF_z%fM%4 z_77RXrHupT=eg$;cbxeWA-${9hL-g-B9UPxl_zR>w818^AOmqWB9*=Pe%{JyhC&sy zj7lGSkqB1bLG%9Q7y}V0xGF9DdQmT4J+ACffw=GPLv4{Xr@#Liw1|`5u6mtc0zbeM zn!u5W09Y~R+2y2^hct+#qM_mI;IB^s*V0cHm+C{MAI5Zg-a8@ssoPpfv!>B)O|y`E z<0ZjT;1_J!h^LRsa0hxccogb7p6}#HwIQ|FzMLQa(HJHakCmy>aZ6T7B0LhnWEGfp zGJMnJE_t3U%ebY9p%t^``!Nq$jYPAW#f$4jmM@63{EI7bah6Vz8@Xl>x{wq>7k`C+R!jZ<3bB zW(0GpwGV87<;%}@l7&t&zX6Y0;ky@emF=#LZcd{$#;ayVihP0rvHz5`>kez!&6KpV z6Uz(IH#8~p6Btbyeu>IIkiBxBHxA$Y#8=IT74JlS1;mBbB{=;P`fKJKkTCtn0R(F+ zB*A+v#sA0t^$!y8YyNtx+^kxUrN7B~F@x|~fKOfslkEKz76Ok2Oh+LTz@&na5`P5{ zS6AgWPQ@@obY{r8(P=-fldJNeQ$>49uW;XF&UzG*uZPgQenGt;k1^Y&ky7siCG`XZ z!X>{~C~>~;gh)3ah1&Edr9PK(OT*j$1Fnn#6?N2eo6y!Z zWtE%Xf%uVOZX_4=Pqb$B0y(*Xy?y9wH0K|#eJDl5d6&me$JKX7*&_gZRPsHXLlBU-bgct@2jf=ud6RxDX*2E$ z2t8g1Gj@3*>fj%qM@R5)*{0k7PIsrhbArWiwyKI>M~H5A`#=Y1I&3VB*53!Y3`GrQ zQ^XtPAtdQ~!+abl;6wC{AZ=23md@K*^g9=}fqgZnrDG?`o%_{KAyd^!bKl`54UM}_ z$PkY!W0RSYt+@1 z@aRq-n!5=-#MRlJ67m6OiC~sL1LnWC>y?z^WuCs-y;F^SZ?Q?Dnoki?p-snatt|ZA zBZ?N7{l-uGyQ0PF7&Ch&VT4XLiL=W)qDiW_eh<`d3YH7)^_e8Z!REcS3dwWa)df@$ z0dtexMT6johZR2!|2ugr2vF$6R1D#8;ZVF0$k}oKh-@V}DLLA5BKtQ4MdNY@zx;LA zo}VHRnPUwSJWD@W@)yciE_%l730{tvyFKapo$AY;r3Q1DAFPVV=E^NP(N2W)u4L68 z{T#4ZSavgLHG|jV${iAitJ+oXi*2pW6Qg3=8?@|y`e4Ar72-D&iU{Z)Rnu*cD{})I zi&K)PPxM7v^PMjYo{A{U^rzh^zP7TmuxzX+ySF#}Hif3r%Vn8_RadJjHGYHD$G$nN z&@L;~bA-;!M4V93Ru6kvozVr zcrb|T+6jGBlQ#+Pc{qk2Z>xGQy zQ)q=a1$e(ra!cO9-M5}N4QsB;2?KZq@=Y6gFFP=ZVIzqlBVmd%6qqH@fcpz0XTEKo z%?`}&Wbz+DzI68_HZjgc-&{Cd%@SN3tkxb_r0yA-LzP6fAx90P_wPZlC;P!U=GOl| zgrokKJFdKc!w}@f{s36QuuvZn@+z{=0QF)~dMbJSw6nM8VJGal&uw+ss9`z$-tr5! z7CZzG9#RIBzo>wT^o_P9C5_~iCK536H{z=qhddj%l*d-aE27tHnrYiD4oX|~_{aY9)#>A%-v4J&0@IzF zivII0K2VBW0>SOFndQ;xtze5o=8g@ASZ9)8nXua3LZ2a+cTzBy7ZlagD|co$S#)+o zhHiSgadt_C98_Q~J`&Q!)xB)?s=54w0*4#jT(Ts2TL8UE0l1F*mP?$P3~nLZ_s3** zTz*8w{s$Gs+iJu8yyW1-KGONT%<#KtR8>@rUJRLT;Z}A+b!X$)7MD-)`mUd1{(z4n zUV53%r?Y|ck`icK&c;#NTx(%Hy{<(cfFyb8DHK=DjzQ83Z>iVsE?YLLqMEn5XD-ig z{jWvh3@4OCBR-1hnnfv(`H^K+xA zR|Anuo!gOxs{JWfVMjS#Rp8)VnL}T(kj|C>sB4Fp8!b&ZFJqE0ji{{#6}CTZG`7A0 zkY8qAZg{Ct!LCyF?k&RKxp3?nLg3me*Mn4$+FR1NGkW$0wC>KmbV<~LedVb9TQcZA z`(VNQ%+O`-UCh?+fNYFh%mN%gKQ~6)ZI_{sy|!Wfxh2|I+>1NE+Dlt*>G&Eol{13t zuTNh5vKl{xB8KxmrRmr~_GpVTy7eAKHJ>1^@6Mo4J-(+8&64{|(>Xppw+JnsHePXN zFAoPTCLdmJvbYF*V_Br-NxTw!CTYTt1s((bC~;(!-T2*+rb{XwV5xu6}veMw&&5mQN&g{9l|4&4u#A_H%i_WHJ($cfhX25TcusKvhA zP5@E^eW_Wc{1639C1VGACyVaeh^Id9+v@~A(KvxuVtUnABTWwf)O)VJW6zgJREG|f zP|VjS|IJLFB!>YHU%k=+OSOad)oO8rqTcL|TKIM8z?~jnxR>v0J(Ur%R9s2UN-$n` z_M4ieGv4esCUcJei?g?iY6I%pKa0D230|zYyBBDSL-7*a-6cq|1ecZ;3dP$(ao6Bl zq=e!Y98z3^!{q(m|C(7N7c+Bp*2+b0l6}sWXaAl^IQ#DwhXqYh@o5=Uq4YmjW!=&t zRN1dTE*57k#zW z+CusLwb7i{XTNQIoY=+Z{ZxmAsLy9h$0E0A_$Kz)r(U_L&FVMuN21{IRMbdm%2ma$ z(06Y%Yp%B9KS>UljqLuoJl`4L?e;z0@FPZHL5xfHK6|0oi{H$1F-Cqn)j%=v9@hXc zjW6MRn}*PiODf4JeaO+CPeTTEVQEXO@wpFdlz03{je0Lvh|JCkIoMP=K(;sN8bp4^ZqLqhfS%^v;S#!5kvdNd{FTJ03jRKDrywW(o+MvoA^JI-T$Ko3hYOPJ2{7S@mn}gHK3dK-36*U z@-oC(xG9oWG>8aleg`TsXF@L z=RKFfOLvQ)h>p@z2o^5u0+F>fU-e!rPz)>Fvz~Q^GnN25F`6~whg38SP6r;A3Lg<~ z-sRt8R<`BCtYu=W4h)=T0ZRXLm=vE*MI;~3R<1V+=}!>g*dVtVHvs4Yn=<@?+(kR= zLzJhb+IZ@n-gzIr)AbEeMp!+`XB47fMxwg#;s=i2^;rFI@P_jk@K##(XGBr~!$7D_ zV>g59NtZlMxcND0jT}?wQYqg?a7U*Eb>9`(1{o?}ea23D(04GJN6_+XTCc~Pc$LBa zq$8g1xJ+&B$P9DE2&XIOvML(rLW{Ffe^{K&3%XXtr zMfgi5r6nI-!>K5oJekNZv*OFh5JrbXz8zw-%<+_F>h&wJUQb84L0MQYqXe^4Acal7 zi^_!-ry#dHJ|0C;x}ZEh#b%Kp3qLx=P;Bi`VVtA(H&XB~iaxADT{BN;_HC*0My$gSDJYL@1U*9ZIIi0VKnZk-nGNuRhjdOvTfeP z#0pJVrq!!x+m9gc`&M)Bp$+ah5@hf4JhN=814m~kh)Otr1^;2cnRPc;MlW=Y@m<)d zpi5T>zQ3hheD(yB(o6|SvDec|4K#$aGsuso%Jf^(0QV+Vi%cN2PxRN~s|r&|vVDC{ zn}?#H3LTy9Hb0lmT5GaO$JDQL-`Q`NSk1#b<2f%0egz+0C#-5FR)!XQ7TQNS6# zl|5>)Ns8!T`InFB2Pj8warRudS#(sRPfrLPO6ONuj9Q}>U})A>GVOgXO0%T<6$NU% zDnrA7t*jhgQ=-k=n#S)^wQ{=kyK2*V;gWSf3 zR6(3teUqbTJ8@Ufcf&n{DT3l=wKMLUr{w^f#>Z2{N6OF|N4D@_YXP6GaH`8X6@=(h zl=di|pO6Gsr|QjbowTdIaKwVVHuQnHGi1*Q8nJpA6mE+j9umM=*R>2eh@L1uxNC;- z-aOB}wRQS^PfE4gg|-hO+1Ot)T5kt=3~Xc^(lS1Ys&RB-R5u*2g(ahj95?iMx}x2C z@+x-|VU5M&E(39&4pGni;27hb$GBta!YxC^M2gMzF_+R>r9JMvX}ltF;6CX=*1Kd5 zlk@gg{JeKJAa@pYk4n{RcPf(;C#)~>7AbKS_E{}M2?rZS-;6TXPe~HIo@54`T;2Ot z1m3`=r@{1jUp8CX&@3O`B{|BEt;>HjjL(7df0(=V!E`HEy9i6hh0lD5yIUl?vZEOU z-jV&(jO+d0;)TqE*xI=iD zdV8TX9sZ`^DttQpRPd!dl&Q(sGx~xFblRPAX(BJcSiX}ww0A=>@7-_fj(XQM*oJeu zQ&`C6c=P&GIP;qKY4CuV;&`sqQi=kunf$N2>riu9SycbyyO#a_PaAf_EB?F`e+`GV zsZ_Xk(n}QEnGhQTrvoLH6#P0Sh`0nSlRJ$MOOEpd=hlevB+lBO*(dlETw+G!-H+US zcNTVO$ZUilk}HM}Hx`SN92X68_jKo79Pj7M{L5UKPT znlZY+Y^i{dTd%>m;CP{lN_S_<40XALgYBYsEI$An^}!QBg+1ys>5iu(HbfPVilqi$ z+g{nE?t8Sal?=kfA?-0=953s{n{m#d@{jXov&p?>+8J~HwM`gQjk$s-^DLCSi}z@A z>EFTU&@(Xt`}4DT?6N)fQLOzf!GGV!9IdNoN~6ZV?=;cQ0LUGun(7Shq0YiE)UK@L zfBKlOoc=oyMfEPXfg4XL|I@<^Z1CTo^;J5i`ZEGVyC7~E!3&8+fUWx=EaEP_Hf*+n ze#5wgZ6RDy7*VI^f;yr}IN|Pj>Dvy8Ig1E%+j*l1Oj`NrCW*&!h11>BY5aG%tHj?L)z=%1e+sAIpIZdyy3 ztIP@&L4%WrW*YvHJDya=T%C+zA*d&NP?RZRI#S%SXp9T1!w^dKx4HkrB%gQ3)w2uIt)T=|9rj|L{s_N z)XK6`^Xv>*Z9`2e@|Y4E32bXq6pxFX+%!`M#r_FZB=`JO6>9N#!~53}%;sG|Vxz2v z(#VjynUg{+G|0WeMojuKgRBV82fyP&Tbw!MSLnQ$I*TGhrt!*DS*Gaka&M}G_)V+KCn0asxT^ZNK~`GP4#p>O+KChM+z;6y8cVB z%I%h~wdW#bg=TKj4+2i>(|gRFHzvJ{KM2UBPAqcyUrE9yW>qb+^~+}f7q_>!(l%r- z(rUhJJa^itHz)yk&0y7$hq4>^sTdhiOAK%$F4asb#Iuq(GBPt&*o}+o>hQ61^Njy} z`I6B+mM4PngBHz|N)(ipJr8bem8~`&O^FpoVfxQZaKC1lg2%=PAKN&!^Q?X%Uks)& zOhoR2iRhU#9na$Df{ai^BOJuGR0lq{Hj_efyThZeU0-$B83o>Lb{XuQe$2)<;o*ba zG_tMn!cwpY8dBfL6DW`oyfV!1uHqalc$H#JXe@P?)#I9Hb%sRZo4rOc0H&F8Cs z$l?<>aXu46k+}Ct+cKY36RI7WPDw{~Pp2|huT&^CsgCB_PO@xX3x&fk2*^>bid&K2 zm-RCLC%CW4|7u;5l0Oan$HcGnKl2KGUW`cKh%)8DhmJ90V%lz=GHhaK_EV3X%QROE z1k5HZN-kggAQaky114sSUtp7ii3PA`tKk&5d?$IS7R2TmhvWB)NAF?euC!zO*ePRg z(&vEE4E}^KCUc}TG{}e0wu}V-o+_YMz*`+m8OqV~ZjlkiFTd{<{4GOj8yZcv&y;Z8 zEy#<9+s8M#nMO$#iKD_g*%rp&(-q1{SG}4@;;|HydfdvW&>1T6!cz`+s$`;QuOF=; z2Nm~^T#GXhz3OZ1V2GvapY%EF>s$L_`4DPLd0!mH)jHC0ayCH)eoTTvu=VHx=5m*V zII>D}a7u!I=(og?`lEzr!PyW7*Mo<`tbl1!mjziumjzu44_K?O0!w0jfd)Qs^3}4N zZJYTF|831hQ~r9#B!0eDfCGK#*N*C1<^ySr-lpu}SkEpgy#+@<6XE(muCI>=IdbNh-27wFmZw}dDCM3GNo`x?AFnRK$0gL=v(SeX!t(+|mR ztr#p4G&Uv)+6v-g1M@NKW$5qy zDGqBsRf+G~y=OOgCc>WR)IXBc_YUw(1YKqS?r8Sc-F&L?S*M(JeEFO|Jrt<-#Ebq5 zGbB1XIvC|tV3FWjAkMZ)nUTN?T+k@^To%Z!9r-c{@93}Xu5?1!@Pxox4T^}TT(#ES z)LDfl`s?}j@s7i8K}$i}keErO5en$x(^B)nJyXfFn+_m&K9Ak>^r=K!nR_(Kw0fu7 z_iT&%o2)nfP=r)6BFh^PwB92SzZoDjVoll)(mMCO`&6% zGG!s$#aAO}#+zl47~YrNDFbJ4+p&32ej5H~A+dlZ{c{$=VZk&>Gt z?cgnhWl$j_dIeG7rFlUx0Pe<>r5B~t zY12YX-{pY&W5;^QaYgqX~_kwgm(4r^dvSi)7c+n~irB317S6 z5U2YqD(h?ddYv`R7k@hO*rJTA6cG$TTk(N4SHJpB;8q{5)MP@d?BP~wd%5_c;!TBh zO=P3T>CAD80=@`pjPNw0qRu8dCcvM*ae-atM`S4U)=)mA4ODw(&4(V4(VbxtnO*j_ z|FFS&y6yegs=K0!3PDO}b$?wKy>*kJ&AodoZbv(n9;$u!?~7YdWy3jFT+N*s>7^A) zwe7k4lNVmoQ5hFjfl-?p$hK3cofO=F;jPoy=W8h58z8}UffU#5Fo?o>vx&kBBwqZR z91}7bTN0{ZSGWC5ct*>cD}~MDplZE+AcT&{rARj9#tdE93|>FzARNlR86)VeNom|S zV5^l+miqjq&wCu*ETtD5tK9xYuGwRqmhn>1R-ZZQ9&D>Q>~tMO7(G;@baIZ;S=Y)3 z`3<}P%(s#%DAbTI(lqrVceE+HA|zbo!$`&Q<+?fD%5V0#DZ{*&-pYql?gSnUiO5J; z^%Y^dy{Ak~{&zax5LAS5RyRaE57N4|t1y2x1N~Iv(u2vX_4R{l{g6%1810xwiYh)x zfSO~f-($1Z&qEe@&|*aUu899luB3NVbYD$P4T`{YJN;((1s&|y516an@ONAsoN0eG zZkVu=X;eL?v#;-YkZiPCrP3R3?&*w~@~PsAh6<+Tzvm6wrL$Bvj9fpJe(0VUDb*5H z2rytU5SUi$DV6JOqKtS?hc_pf!~VU+#Kc@r;K4rG{Ame%{o^YD;nQ87q5LbYW{7;) zp8T8a2?cP;n{>kC2_UfQM_PSw`&?l4mI@9%7euatC_PugFsoOPM8FkaT+bDMT26&e zh2>2Evs|K(fMdZOF=dD2554e&x;BqepCvuuDwaQxW`w@YiuWL@s)qhdcI5DIJDmt0 zI0*vxp=#`$rJ<(%594m&@lx<-R)F6@2W({i=1t+$>!5wiPBlKmOMIygar6!c8W?B; z2bBwKZ&>QF%HExi{J%;DfO1~XCf5B2CEokbJR@ugVa6u5W0a*eCISrJ686Cg*;*ka z46*5bazasM!H^^rI0V~z^tHgnpq8?Z8+ynfXOa7gL&GtYLN#9GB=`VR7C;s)hGrdi zNu(4SL^udZ?zvv$>civx`_xnJ(8^i$uP1 zaT|vXO-&t5Wt;b`9BQaT6a8m$eu^`mNG-EF597*}Jp5)$#;w(!30rj~7V*2;W*1H) z6tt0e@>+p7-<9`d)os8+VBbo3ccTp?tZSqf?MRFu4ag-PJu)mRt|uVykd5iic}wc0 z!Hw?0*B6Czi1_!f2eeAN-c|lQk&&-o;0*u%qKAOktpqhsaePq}fy4HT0k&1}ec z?f%1k@B~u&+Km@-)u#VSVLH;eIW*C}ezW$aJ-}!pzOPKmw$>$p*XU0|kI#`4ad2)y zCNv=$=(4bx;Z>1E0sg#|L0XYblQ46xQVZ4`9dq4{wZ2>yL_VJe+ftbWWu64wdb?6OZyd`)A@Lt8R18}WxC=dxu@GP)rX|o>h z#pKPGh}02mW|qSCb$rEUA0oP=MZ=M&p~aZk@YVbJWJQ9Zc<*o8_0UzTPZWH(l&dh| zYMPtN{wuU@x`N8b7HDjb~(ND*Mf9maC_)q_b|8*OLj<2|iK z*Z`IBDH2TY@ge3=hktaV#+aLix%zE(tn639Txo_jzmWL_9}R0_X_vRUC<~>^HcCh7 zY}nHK#HMWFjBT^5@luDd#j=qCeWya*?7~?ZMcZ=7@?u=VB5}sM=Hjv{jD_vo;qrqh z*3=?^lfHP_4pm(+RjgwW4=1X_Kl(40ql?O@p4bQp{CfG4)*m0S0IKkf0wRGF2UH)b zb!RkcGH(qGJ)2D9JR~pA^}^vmATUMeq?+CP{xNv_c0sHp>9+a5j*Z8Tx4Vyg?H=dm zK8JI+<7`h~ESmpsX5SJG?n_2_?HVY~*Q8=+_A>!<39mI$WDqWjIffaHL)2GO$R=bn z&qc^2PTmJe>QC$GTWE@wLRbj;NrrrM@ZnmLqKGb;h{37UQ`OX43?6pJ_C8|VKaMC~@%<}G8Ifpx#1gGDdi^85K+6qK|>8dplIoO@|3^OgIHbk;h|lH2p7;yqJ-3;Nwc zP!f@s;PGo(c^>uu3cz2}+xV z%~+G0IJ%P0st3>!Qs_$5hn%vt_Daf5i7#wcjzs8RA3S(B-Pewu$1E@OFDH@xQ@eKK zQ$@fzVpdlphr0}@1YDW+l-C&emN7xajEMf0k`U4xMV>*~zmc_fA*4?>5};0uJl9bn z8)ve6&x*d5q4u=4Q(%RqBpy9=zwN`lw~0a^VD?A9wbolyr!Sxv&UIv8Uo^k@x*}^_ zmIr;*5h4V!`By<&VK<6?LNt&d)l*?f`emHZZUU=L{Db?btVmq7rI1XF6k)mKb#njS z4JL|k#_!JiclQ+w{~|pb7&uThRK6}{!)$X8w%)%Ggocj411z-D*6mhp-gLBj9w`5A z4=g+yErEHRs7^;b0LoBx;NaYq@Dqc;N_b`3RJ#+aIA6fK3x0AsIdm8OHZ@n~)Sa6aDT24v8-a#Dt}!3Dqv>zFuNe{%S03{yMp$`y03vh0NCf z=Ir@O$ACzBq3t6;WbApipw>_YM9Edr+gs`YtFeY!RMDnX#n3`##AQ*>a23x$k2B*& z8CcDmxy-h8U1wjm^Lg9A55sqzuBnp-eBd*$X=Xy$+wUTcvo%w9HHrty-Koh8!&Z)o zPB2D0pU0`E?z+EbQ1Fb1h#2C$p8F^n{si@TI9OJ)2n<-+x!PKsW0>}LoO2J?sCxMC zhyTT($G83Oy@UvskzEFP8(c6UU5rvRS5c4NfQ*wMHX&^cPxbc$pqT__bQNQiwF<-< z)9`|ru4nT(HW45p(wJmRh-4E?IQ088ex8ak`3;L+7AaX`jF7F62tVVMc5bYW!vw_T zD74|{k7CDD@{8~F;$gKYgI$_JIeU4xPbHhoOFJx0`9B!pSt1_zO~635ys#ZZz=J#n z7M6dhCt};f<)5lKNT5ynE(2qQ;isl2!mfR;x*ptAdsEuaw`}nH zH5$aYl%(HuoK9!`W^3xXl6hZnw>UpUmQ7AjuzY7_T; zq#OIq;wD9_;eaiKQ@0yk69iWH>v-)g-~bfOHH$!oKKt_L1uh_o%D1=nWh@}x@Vuxd z1SmSoOKNotR^6-#&;xwrH26|YBdcaXlxmG3$nS(Hu0=H1ITL@qhZMDwYb7fysgC`( zK(Bjt)KMyP&70i`VwiydmH$9e`|gnslJ^(x^R3~G)f?<twpvRFA5u@!?k|E@R23NyGe1j`Lu z3dI7SXqWTfa|^{lvqY8M{w{Ag{NCbSyp+OE*5CR^iBcyQyfns$7N)SLmU?;cQk0Tj z&u&hc59RqI(A8g@Di?3)I?Ki@i;eeQg=TAxdAJ(Ettm+p4>LApvHW~D| z%A!4OAVrBio!R(ftXV6%)^oYGgH{9|pL+ESSgn%UD3<)@EtMI*kFJwQ;fuMHkqz|e zFI0uU$}lTu1_V(~8T>ERG&Ben7Z)dIw62=8DGeJG9h{X-wdkj%Dq%YTig{Dqc2_$W zxCIjJ1p0TTu+j41#0f>!nU0Fog}e!+>NnLJ@ zS>RB=8>&D%7@oU_Vqjn}9QRvlY+OJ5r=0j-PU)-v5BBNw|JSC&W2~PvpIY+sDC<64 zBykUlFVRxn=0eKf75c0S`f&p={O(YOlns&^lL?Uklh8*-V&GV>%|r*Q9)C7|AN!0c zqCZB>#^xPrkJkaLww60DDXN*1lJ{@yoTk9>l=Xr)3zBDAKPe)CHQ=l7<>w-d)XD}o z7V2fu(x!N`=*3+S^?t<4DVU)y>WnON0_kaZHS2w7uRQEP92}^EKqT4OZF=Ub$z_X- z8OHv_rBI64ABS9A&Z^$Q^kwBsF(b=^#G*5etP^IqKl(oytmeNOpKzI;$JaBTiJ{(` zS|)DPOjfZBja9+v8u*p3-D@LU2yLM}bY}o00q!0-q4$6HgAj#|C$PSWwXb2-YuDb~ z+2(DqnT6=1D#m#?togcqzM!!XCV5xNlNGzjIcWmQY*yLR5gj*Ef@4?06S!T}u^Prv z#8ko$(DunnHby>QEAFCRTw0C>SwlS_a!d*IlB}luBcE~i)lXs6y%77s9Pj!|&+#li z=7DvpCwJ_w3md5Fd10U!+jR;@-VA*}AOY*6_5yUQf@prFpTS#4M)}zG@Zk`(;Yn@X zEXI70@;0V+`{b@2wM0skms2L>qKbzeD9Up5&!ggfCvnacg||frdX+`S2Q6m`$pZ@s zPgJrH-;=N~Y4Qi^=YY~nCUqBW34Z5@wNlb2b^RUr+E!cZ%oeG>C#t~vZD(R^@gMwk z&e7(xu1AxPfEfFF6tK~T3A0+$I|@#VcJXs)J$rzn#`$;9`*u;roJ^fwW#poDB;yHf z09BMMjy3SYNw$-hado*3w}f%=**`XVt578NyYMsG=ba#oWQ9)1NVR3;O(mot{2?6L z=18zKN0>LGLD%VOz-e`}>@C|A*=MmcKwL7BWV=h3c(Fyh4E2vjWeU>&lPRo*alVVw z&XK0d7;^po78ngc31b32p)WXKeJ3w0lGyaa#(~hD_RUd_6>h{g>OhlD2Ip2|OH&!nyXcnA z^?HaF78hf5bcCTyGpQ+yvsQMq%@=hCkG`6LhMfH@DkI5ED5 zvazxP{(r~3Jc=3?6inC`m#!k{cdh$g&I>pHdiQL!x_8?c_T3F5C|1C*P;Y;KJC(G{ zijYwf7X$nK_?*SSE#@7h8}uEg2m}4(s+9JgoFZ|yMe;)PYeHVF?W9})pv_@CHbpL6 zOq_TwR@KVPigp7vmt>Kpenaum>tXOP=)QLMWMEhQWTn|>zH%EH($x8jCYdh`>eJj< zNuQkj&Uz{KR;_7yxo`HesH2#KA-4muccdDq!zALm;6q~Wag0zUSklqZkT5gHBQ~HC zT&ns(kgS!PW;b%W!{cc)b_d^zZGG!IBf`g6MgsyzA1#YngLY;b(7`~ps z3pSBUPBE+g)YtCfeY?^W_syp3?F;x_l~O3N;IoLbosJ)rk?lNd5x(5MQul#Itk+F0 zC`Q$XRjnTYWJsk%FDN409F!VNE87G~AQEm%A;pF%tBgtga+p#!bNW z*i{uF$A{ZiOgu>vz!Ic`q3l_u#$usEdcXe0*al#zJ{%53EAbtinz@mksV-IDSW zL^I$E1C953n@9){m+z*aQvXR4ahd0+F=Dzr8^HNZVU~m#t#1kDw@bT-u}vhE>;TDm z$+b<-yQt3a*SUMCs&CL<=QGCr5K*adGom#bOX)t7A~Q)R=l%eXtu@IMC*G$?9s>IX zpghjByBjq)ZO;E z$CjxG6zrh@9bpK6%FqUqg*&F$)8wJkp0MV%oyPN8hn}E{Y>AW(WDxHg7K9Ar=uVRD z5Ni3-amX4Er_=VdN5)f9ZRQ8tY3i}>91gKrj17Xb^aiS|Y zNFe-Xn}*o!x(uwtdG;RGNp)w(YvCiHf*c|*#jebkN_d9%zq3;-*vECIxm3Dvj|Bj zy0}87;XUO+!T;%<(6@U^qddxQb}p1RN>A8f<@|`4_zoS+5dY^`qA#P`cY9IoL{t^; z)7h_r!5|p!d$Decr-#+d(}a37 zWN6PhuGb?TkrJq2r(N%1!37+S`iB0_IMg7OloG;=UI61Y_^K&{jYD?wlnqPiCuPLrO1C6MJmK-+c#)qrYy_|@?+A@ z9owSvCs9B$hB|f!cqC+}?UL^>}KC-&ak< zt~SYH9u$0ACcj?k4mH5K=W4U&RF%l6-xdzyHDY%1CB6^HV6!sjgNoCzq&4<5&#+w_ zImE22B%pLZq;1VaN~le2Z%>Lqgxol8XF1(th3B+NF74ebiKQyK@FiE$ZU9t_Dg8>X zLItHNaPEP+GWGi>{>qjho74Z|z9&f>JV(Bz72h`^WgF`PH^mGoP7`M%B>1Q-82FY{ zp99BgmeTS_>eIa-kGH6UNXd&^D-TvQO9cqlNjnO2tzUwLLfA9&=F@?x>U$SD*=lTv z4KMyPYc#1a7bQk%h^%d;2Y7J{uG;MJ=Ih+Zt1QLo=BmH-%F-UaXi!k8#J=oTrx4r0Pz1yQT#wa|mA z%@Z1bQPlGDG47kR2duxU$WtII-_cro7WA*8R+pQ2Q_m!Yf8*G;XD{``xi4j!%L)QNySP zQ!CuH?`Z7cVq8`VYRLC5EdXJ0Q78 zK-(4)T(^jAmTF;y{XjGjZY)3&5^?|NYTcTfYeDI{aicusxycR>&@Zfd7eDkXG05bX zu5nra=CqpZw|AvHh8y0vEHP=Ek@BeX6*>vCsF-xm;vhxkQdx1BI)kVuoIJ&7k#=o~ z?yf|7Gc)g2Qh1cWh0nY|qlDkf-wUiZq^)dBMpU3nFNO8K+_Z<@#Qf;kf z@|a)Tl;NT8ciQhZStVsmIU1h$Gc{pnA~4dfU(MTyF*-Ka~jMb zOy+ONOk4CZEhj;KiEovghcWF2ez0=$68|m~dX2X^!O51~Vf?cEdm8PlYjE%Q2o@uf z@g(3Pw)&HR?7{N7^*%Pe2oAzEr`Kcx@a1QoAZ`$W*>Bf^&rNh~rZNRN?G)H@NTuqb z@Iv-#;6$|GumWZ@P(3DXu3!QWaV3xU04%W*Tq7iejI=uo(uLrusqiZoyOPl#eRNkY}%$Nnqb7o<1G9?kT`vllO;bg=d4bqoEf+vK?%z1eVAlW#Vdf z*BJ{eDL7DJ*M3Yh*1xNOp3XtsGohF#cpDt=5!N%jFidg54?}uLd7kO+GA+83uB2`dpOBoN&6d58)plP!X!n9 zJ`~noPICMGrpPjz>9z#RN-^Xae%p<$7nl%>?(sFQkKJ9QkX=&GTbm+O_)!Dqn~88p+*D3`SdC)wQ?(&3uhDhIplU zs=0Zhm-Q|`VBRQZ^x*U;uY+r;14lhEVC^h6QMqfKgz+8M9@_E-hqAJ z(PFIuuHV%moM+lYJUMqM&`NzWz|C=r*JeuKiS*ti!-+I#B|UjS9{cuE%7sh8zAND_Dh7tfs~)J(@eYLoKYdxrAN3yQ*O@7#ML;h z7P=hPtVM%^cYpoK~J|{EV@x(2)Uz50FL# z97kdJw)ZGj5{b5z_RajkJ2Yz~JEEMpJ93|`pu3mtC@7pI6X=*HthHzm9P1@-0uKuQ zosLY*B>?6iS*liB;z8S2-kZ{%KSuB7Is_Uhl)V2-8{RIv6bvSzG(ZA-6mBZ^AqKCaiAibHxuFQX-;D)J3YrIUX- zV?5N4u3SHhunW5!FEF+)$BZ~XbB(z(Wu0CHSU)=8o{Ol)RSVex)zX6L zSz#I^XnD|vSp18R5NHpu4cET<9Dg$vh>d$XtxgLfv`>|NmU1Sh#GB&~S`h2y+uWeb zc(KSP*PHIH;V7BZ4%7ZDDz z+fZJZOpVXwbLf*d3z(28F|Fpclaq@fxOL?3^t9dPpzTNocW?4a)%(_>th0#Gg$1pk zr+8U}1xkB9S2U}>hrG-7n7q8(jic2&_0}1b+|ourZ@GeU$(AlWOnn=b&$Gagfjd zjd%C{zBm69?+{XD++cD91?qeeigFcG&TofnGdfMSQAh{Cz{SM`KS?fza3=!h<>7=m z>Sk*fncRYVFnkMXfAFVd4C&@$F26ZgnY{*TDoHK=lR60X6_Z851b%MS*cucX7yEM0 zy6)S^2ZpMwo+e#e0*~cKMn=5v#65qHGClP)x(0gPobi2AFpjd!yyv}0A2nYF2Vt{j zL0X}Sn8(a9cbII;610v3l!_Nj$&pJUnhxd_QRBjbn(9S8d0g0F6|Yv+p6@@E;G z)8F8pokXT4%0w-PuJ0oFV>-h<p1C2DS0v-J#-4hIOMGdb)OIl8pf@GrZ6gRyZF zrL&~R5hTC*ZdpUhiUYc38%_+ITZ`B2Vn!m+NMszXL#*Y6uiJxq7JBh!IgGqSn<4Fd zo_Ysp5=f_sM^76oJau~GmR8Ijt}r>jc6TbX%f0P`C^XOCImU$1z>1L3Xkg!CiS>Ig z;=$-YJx;hJo&r5~{r0pZ@+6f>53t`-xGQ5eeuN8z9S638h>3RJwH4|28CTmfaSsuK z^x^Y{S;=jG2HWwY3yrO$Z^61vpN_kaE2(75ebC!vg}y4Q|J>(fvuNA{+#xk6B+} zp-u05DM45aQe##apjiisBlbKJu%any4s+1~`A-~gzl0_&MTDkwW&TnBt>f$U?~X*b z?Lg!Gy)&bB=o4NFpWWp+;G3Bsj&ZYxF2GQSrtP}wXfsuTI;tr&QjNY z1#XQsw9n$vCK& z>fS)|M8UDnpE!A&H8~6_;&%xcx{@=4mi`;UCHx!KKmO;*HlRQC`fqB_5<{w-hk4tN zI`KL}F3E3m8>c1I&Z*rC9IXWRetYv|(q?8g zLPF}JnPvr4AZ-l;gCMuX25z%vm*h2=6F}O>2$8+MNEDDB(STVZaB@U*oeVqLyiEqR z=$}cnuPH>GvJEo7RB^!hI6#$5LhGV8-Q$iClbyaKP;LqQ04tLaW`2jL>X}#|NTHq;oc&9tJkRw+TG$IkC!?p0s_h%Hy0oM} zMShn!v7RFg9cQ%6ktFNz1+mIWX*r1jD9EA>y`-O8!A4Cm| zy>G-)+z8xg;7en3m69tNaVE;2CnK=@?eOm}14-|-0@!DaJcjoLaxSBX6Mh1t_jLV&uJh#s1tU&6cpc_``_|= zkCoBr<@D#5r;?3iB>#Q20m`^6TW<<mSD#-Wv=RY+|MGtGpUf@z~_ASWt^)vhH>huT$lk z5~kjVD6$<2u75o-DR{c{)mDF?ikc=l`byZkl{1mL z(dPU4aNp&^6oe6jSsaQGW^3cwfIU$3`w)eQoe&z*x380~7R$ zTev--Zw)OvjWVSyP7E8;=0DIL&mDI{Vl!nsWi_>aPC-SBFTg2*4Bt`C?~)B| z5*CmCluJ>O#Jg^@jjK*zK3{w0jRcqE^1QDEto=jqW5PO-z$_7vJUsiBYOwQ63aovP zR?HEe`ukJY;C^*z(!XjbM91=pcC`ydquw3w84UcniO4*!IWLdRy8I(d%kB==PBo~Z z^=44R5altu0g!o=I z*tstsr-Si8j|(XR(%$(oG}|8OCDQyFUN<*~>HSFkI>VGr$^>55oC7PHBWgW8LZq+7 z=ixC9>zt`|QCeQ;kd(U3$ViINssuL^wOZ58u%X+YvkLJm;BN+BCJfHsza|{9xyvbG z=jaF7JjkecUnfd<{aae#G8Z;>TDZF^i*4x$VL7S<#IsD&9={OR~syAQ#%44ukpUg`BKx&pP;s- zdn4uUV!ZgI1>diqjR@rStr!1LR_LH7`C>aYc{lG< zgl?W9*FCu5W5X4yOFDQCwD=}fE9*(OcI>W{UEV(b_(?x4e--C&u_3y% zNuD=Ja}v6sftc@<%NBJ;XVBt9n$Z`sduD3q#td<2WfyUS4$qIDdYj}ph4w$On>0UA zhTrU^)#qt*dNqxicEE$muSfb8 zvOeJYC2A`vg~)^LE>)kVkA@R8)hax>Hqe#VtsIDWY$>kWf|#dAN!I0|pQ{PO{~RgT zxHwzkyQ+vtn$P!4{lV)z=zub;oSLZ-KPJQvEENwH>r8ZBA7(0`HsUD9zMZ1Mv!y9~ zcf(UuQP^h72}arhb7Wfx8*GYR??BErs(0R2@Vy&@m8%OHjfQ#QG0>5oKC8xm+hrR! z5XZ6U+iY6v9Rc4@tp-2qLfdER6max;Ds4CDQ}SM{f4#c(Vf$w7`lj*e9He^fZVqBK zIc;rnI{E6rRFGhZI_!h*YFM0W;_liXfpf2`2?1}%8D(Dc6P~0H#^``SJzAs;LC112 z?5zOUsJRbz=#8#+AO$a*D_uiJg|3pwHfh@nrVk;>s9(972y*JQn@{@Om6+!VQH(2)1Z&7aAzjIBOL1AaO2mHbwv<9C8XO7~0>O#| zcZyeVFD74xV}6WSV^rCmOy0X z_kwcEE)K$3$D~6`mQZdr zPe2b6ZXf$A=DzXqVQxxfi}fM9khlV)kq*fFVLRQ7yg#1f{AT1aRnzj>?0fEOcX{Ws z8<5w;bx`Z19~UIXcP5z!YhVNZ8ravT^CZ_>&a?G8XbyPAh=eHRyH09oSCpFS=(JwX^Vp}g)C#xu3Qza}UT z+>_6niEskfT6L8b4*CV-#rk{<1R0w|cXeE;!75C5RgVMa%pa5+c) z|G+MGNWES4Lo$Gs%#{hUq&?LzYy6hq?SfLjONK>+DXQxNcV^x_v8&M%5)%r66%N_Y z6vV*=QMCLc4f%{_kc%%aIO3q2Lx=H3el<;RmNC@jU6w)fcs33*+KYXs@t-v#CCZnc zJGrO^l4?=WyirvgHCTia{pQ=f2xJ?#jcOq~7bmmak_gh8_+uzLP9l@Z;!s7q%VMR zmJ6-CxeIg$4ghZ%ZzcnAQCZh)1uwFKZ?E@ANO|ZZ47y*oeas0!G}dGs49)CWS+rX` z=;}nwO*}>CNc>j7bgUg;xZ3ebDhelq zLzu!_LPF*4Ew-&eE!?OlnicK?*We-JEb)k(Dz$~l4dwlrSj#j2ZVJ8(al(v(gwFRJ z8}&zs!ulJ`t;^QssZNWZ{BCJ#!s=cC{$(uxhz62mr<5RX@>(+XA}5q>uQBiREj^%- zu;foDnyHy;ljt%RTU%FpZn(q7J54(xv=rfDK@@wDty~zzKeFe_WV%#GqOeu47wMdk z?~?W^WF)(cL>{aYm{LmX@lgZLi+s~Cu{kJT>d z+0ylj%hKR)pt)j=5m?9AETxeWaw}q1(rcyLHEH|8lr8t{uTtgsa~MkMmDOV3sdG3otCkxvHK}#H|aLiT0Oy1 zopR5*P6M62INY`MAk^5bjFYb|LM#JWU2Aoj%#ZAL#Lz;fs)=+fTauX6jh2R~IaeM4D2@2w477wj3tL67K+F^3tI2RASuN7P`?jpFEmucV)i0{QMt$ZDWKJ9vzf zLtdUSoskg`-Uy$5hbHx{d*ZPta?hOq3Xkh$jzMwwI^e%=&?YkCA8m0}m+Kkms`q$0 zpnt0}r)M=0nu_cTFgAK1U1n@Fx&2=A5pD@R&qlZoGX)cyZy;=Gz(KZ;8v= zY`(5&sZPeqqkkJQ3$`F%6Gmi+IpT|n%i+lRBaICa+LLz#YU2|dyg88Lba_{5Z^?IE zs+J^#+_8G4grj=B;Z(=yz$*s!dJ7h;rdN2@t}Rzldz&1s_hpg zmX7a83c$d>nOkVL7&PJV{~LG1i~?YWcK@E%lA}hq~NA;3#-nA=3drk-t=vgx;b);mFq`~?Fche$7E97_V{OlrfoaWF6?0G z`V{sZ(WA@U0!oi|23Aun6d)!(Wos)!q}g`y38^~FI8-X44}?`S%cPdB`4gEs!N=0< zaa*xA zrf`qyLdD8$UPE)6E*vzvdv-nUmwC4N!o>WXKR)o7JboxGwsKy+rzVF)r}2n)RwnS3 z>uWiG^wQ;2?t~F2THxX*1=u?V^=>PC7dTNeAGGx?5No}PAmKhDwGdQDmg>5Bp~(sj#A+_@&Jsn8RIQ{A|zKGN61B z8zh$XV`+uTM<&qmAx2!G?Cg(;r>Bj_@0psF%v6WR=Slg6SPR>%;1(MKI^6zGHumjU=^G$TLD^?)+OtP!YjhxlU%O5pP1T`l+d8HE! zFvjU}VKMcx5yDtabacu!Vzg(10YvM$@8zbPn=dn4vSZngn)xso%D-ojdJQSHV;#ts z%P{9Xv3Ig31yhMwo*UQN-=*?b&Kn>lU9@*5vm*h9!Ejke#sQBd%+HAVKl{+(ww(D- zQ}~HU(}jmt?z;}FP@g>Op({>qeoQe>*4ur%q+DL<#TY-78MV9z5C15LLX>p5yRKp+ zT-L6np19(_*B zMh;hSUDZNq-Vn=ealfvug?|Jx-IsMbm?>&@-H(xQJt(|gT)Fac`MLk%^Nl!*#LXE# zV0TuZ$>=4^f$%fNvJ+-$qIPsh*_!oO1Q`~E!aq@ATxH9T_5H-~gKLTBr1pU|PIGX) zy|^>Q$GIg&{Q)L1{!O0NdqbvY??#D#S-u_@Wa00L(=BvqIinu}Nrk=v4_QrQKJ4vd zx#!O7d*U;y#Ex{rc`jr3b@ixA9Yeb?vPi|<8faeeIZRfjbKUGeFczw^#s4~a95_H) z|2lcFUfSMIPjV!~DA+qL*H_z%jAu@i(9_?nxxQl-QuGKs*-!;~ywz`~lnC9yRXN}q z@2B=@GkN}wsf=DqTs59b<-9jtW=8OE!N9n>i`8#eX?4_Vrt&r;LMpP;L?dXz!**)L+h82 zfV#!SdS^l;u*zdEeRl0oxkzo0exZQUmW@A|6I;|f(YlbxI2^ys*@g8bfg_TEP(u&| zb3C&2P~3dbTxtDe$I>AeKmZyto+iRzbNmUx;$$37rj{@mHkanlL~u)ixX08mEZtaW zT|#~ z`R+~Q9c)S6m{|E%<-$u#`eXUHq0y#X%0NPxRAKJ?mTWA+e30iUMui>iuKP=encg!#@##)^oB( zw*4a6h(oNCcEggqH`YT$W2t$`(2 zby91|`je=c(~K8&$nSGZYloWZNR#IvW3gE?KDf@ghw(%tFg~*RIW1obsqHrhtEEX~ zixFxLXOG*7;N2#atx~f#-Hxm#K`aABK|}IumV9=O0;DFsR~L z$7g6{T;<+$1!PGY|3WssCE3v}$SjBdqOF4^fTl5xb;XBO{`5ak3fPc`BYxvgEOO!h zEt09ya(?*ydyQj4yweQv!>ic>X6fp;ydJ|No8N5hCyrFB9mbh#@l6-3zLA+5nD)vZ zQ7rr*q4VA8qevlWgP(=_KNojys)b1J$Pi16znyv(uqar)=`>Ev?BhkTnLaW)5nj>4 zrgmalpx^y zCv>3a&SR**zBJ*{gw-VtFX{~rFY1c~a_$ZrB#`7-i25E%=p+!8E|Gby@OzAK6ve4j zWafi(47N!ldlkC#0kjqsOBmoTPG_UmKrHdLKX#4ZS1vIH{g4=TE%Y6HC&H#rNLGek zxyptCColDn1mmZR9;?GUW{2foR=3^{`j|c}F9+>K+F0KDesVbreabxtH^wT1 zS$~zmkpd^*#jC?rI&_memo*Cj_bEIEuaZsNxi#_mNvg%0;zI1%ih?leU|WL-ID|6iHx ze-l^y@adX3amI)wOWH#va>6OhrS2UjWu$T{#(oseKw3S%;9L~Ptzg}G#>hTBd1q7} zKgud0PyWLG=9PQ31>MOGvYuJSQzb>fG&e8rt`k2$+TKxF{W%aOjm^wSN}=^_(JjkF+vUR96hX-4_N;g4pYF48_T^;x{7pB?E_x&Pt=Shyrm?qM>}$~VauGv3OBd` zrOqxjUfqj`!g7aWhx#d*wkOJTMfy&(&e`J7UD?@XiHBD2>d&~l!xC zcv0F2+0l)>-!6Z5-_wmM>y z5050a9k*~LNPs71pi~-k%<%8~I=Q2G3Mc*T#b34kP;%H3pdtIwg&ei5kEyv7vV*tg z{X%N03R#Q&)ivE}y2_*uB!%8cmIfRP>|P1cb&aWY2ySwD#G?SAC^}0~Y+)EYgWON< zjE!=`?cOd7(kgq(g(Idyj2^1opGM!5`<;{XbE?^8EL&k&JDY&cr1oBZu4xkgO2`l~RpCBz^l z&>c=TH_mp~AeZotBp4S#8-Z-yVW zAYqC5Shu35|GDnNmrH#22UzX-69JTjVqR$2AFjce&;%fS;An31R=jqcni#k2Yhts%tX#4 z8Kq1Bu)J-#mh@SAfPIZ;JXKHp@}0}uDG@uDE2J=!oS&inuYT~(p#Y2b_x{2P$kgM3 z+r#IY>7qnwX=yGVE>AMV4;5_m^s7=v8uLN1O!<@EVr;rr*}!vgckHK> zU#v98>?Z>I2#tL)wJ)t~AB`U3(SLEwz1!GFO1V(X8) zio*HQW{LpMSGu7eN?S&ZC#JjaD)+!6yw?4&?~DWqjgCdeH{w@5ZnEwv4OI$qw1hmG z4H|6!Oy_Ai4WZd8`}&99`dBb2o8VXTg)T zO*Rk9_LNY9dUy;G07NL$KqO)AUgbr74=PUz@H{{KQL(ymR}dRpEr1d_){??=%m9uA z_502binA7+w!*jt1DR@sx>y+Ar&>TG??yy;_s`++MxJ)ZOY|&F8sM|NFeedF_AB?a zQ7}2T60aRf4U%Z=wtk{5&iU}D>!sW$ZCNLJf!6EC@me_%zXZ#IR&#krXd3M#0dw+n zma_Z!(zwt_+uv-dqz%MX6%yf(S#Blxy60>Y>MW!d1^Bp7p*x>1%QL*`OdGR9Z2Mg4 zkE8%4#fXs&*ZZ7^(tO8LzoTFKssR!&k2Kz?pT4$R@pz}AUy1Yh)AlHTJMCN9v$!fX zx7dgGzYI{&SwJfkiVu;8r#H6l#}wu0sLR>R}m0T4S?5^It?zLwtn7j8lwvAwn<=v z$4>$(=`IQnX`AKgX9r(`jw%_m`4Oqvmh>qs?^Ar$my-exylVo#!11|3MsYj9Na1Fy zsiQezp&`0&lVv>dd`dSmCDilu3;svWb8t|T7o_9FDPzS{oZI-Q)Vv3LREjv+ktz3` zbk39vxF>@J#%`xoDNU02BHbg&nL1niFR`Fpc_t}8`V0x@``x#&_It4mBKVehc^!4a zKf@b~)5Uy=9I|Q_*Jxsc5oTS`=aWmDHLtp6uO&d#bnGSvX|Jj@?Wh-jY)qwun6WFu zTmM^1BJ~gm6=}FNFAVE)q3{_eK(a*z*ZEzf?&Q@oUB%}<&DD?HU+yU=gP z4O-?wNgb7SaqGP~goW-6LFfbRyht_r6xHmRUfn}1ym2DnV0J?PaC}k}BmGC_oo)Z0 z#>0CNf)W7FodG@y^cs)HZ5#Ib6W~3YO!J{H{?$D;IU35A^HRiJ5gJIs2e5#hgXI=DM;Fa%Q?Sc*OC2xMchGYs;vO%ujiTpu8 zeN0_<5x=-E*GxZO9|_wgz({IM>vGHNKx^ky*1o6SCVpEqko03ICRkRGV!K!>DLmM- z1sv7_^XYYz--WUv^^)-bzCEajsrs4nfaCa)7oo5v;Z)KV{6Q>3?YiO@a6Gq^E+Ey< z7tHrM)h6vdQJ#mqJo@FPzDEi;((+Od_({@OD8*h_gyR-IGD*JiUR)!t&gxD4XZz<5 zR1I0#{xDUr*+wO&vGcVN=G{a@?Te_TCa@_PgXq8ayLSUmJ#l|~LVr4-;JqRh_+wTgL4ZbQ|Mh*LQmCxjX?#!A zmA9tvoARaa>?^I9g?5~3oAsBV`kV57;PzAn(WRGD_wG(c1&nM@*DDMDW^Y?u`R(u;JhlIbcL(awSsH!460?ZDV6TJQVs5?75pXlmh@+?veW=^se zSJo2PILzBo*Im&KK5n~ogF!!*;NYt4lw1S&z*kpFsLX%*Fd&0Z4D_(MBXQo6|Ne#r zkg~nJ=f6au6xH!D+`N5QhvlZUo6tizWMGg!FPk%JUraT#mFJ^Pfd-MVGOS+eaR*-_ zjdSoeA(e;SI7bIXzzL)Mg_W)-bb`rgx|+r~@ZH8NUY5)|T5Kju9`sFX^R``yfCGIB zFlu+Sl<-|mSm=8mh$cTJBYkxJz6LT4+&<7oZ>zRuSnDPy4xPg@2ME${ZMGZEYoL21 z*u?MJ4VfyI2eLw@>T4lP1SF1<8DQ(xXuVV^C;Sfodkf18A=-fq&M0_zJ5FTVVBg)G zfHR^K7RfsXVD-++NcIT{dbUzCLdE46f-E$|7e`1cXWIomw&NzWx!(T znc(qY!`@0iR^ddGx9wc&lgsI(()Pu^|5x(4yE83Z6dBr4JUn+nG)9?wdmt`tj|yk8-d3h zz{mHT>f>hLhk)+VBN24hO}=^NrCb-Q^?A@*-a`@7kr=&zMiJOoDYn8Q=E_%)4<8^g z&QJ9wbklS4e>D%4(m4ab7aG^2>Vgub9;w&C>Hp;&{4xhQa`oTafw@5EHx*hZ0+ zTO=eG^C53|%FgaM>g#I|mMeZ4Apg~Vf@a(d`Ygod5#y@rnF*|tT)9zL-w*m3{0Eb4-Olm2j!~<8FY3-U!Vy{#Of= zTk2R_u}Ua$z{L%3myT+0;NL7U%k>F;rL&96moXKAleA2;&3FW!(4^DxCVkYGlv_Xh zOHQWg3I<=FqWKbFMc|Ay1boRQpM#<7j%A8R_qw1fF~)=1b*lXnLP8a)Ca>_#GA_hD zcc#j`Pj3~H6w5Y9spZ6el;RS@-sAqSsTLEY9(q_f_Ha`XkR=D3m!egZ0<~R{CNr>X-u<(87RY(tniP;OGX>4 z3DBN8kw3E8wg*cV7tztPrE?3bXnX}b=vjkz`+OHrULaf2bxG7#GYdbE3xF%>TYi2T zA$U18*3H11T-F`A2M4f-XE!B@nD8;{NY+uL2$~0%H34RwvcfV*_-~wr{g~&%GpZV< zr3g)h_A`po(3>Qde0$21Lj(7<#65*Z#zK<|IR7+xP=-KU4XDLp1Y5If$vwns!rEqn z1!LlHWOqy#Z1?yy$07)N)!>)-j%tkaWxOYSXYz7Y*d3;HF>r!rLe3WW-Q)wE=n#+XnPyYh*OZ6z4Zf!W#@r#O)#riT!E#`2 zxEC+^&K&2g&097a=RBXmYS4uP7Z}W1quVBN-Eu$w<@7y8-7; z4NF{L#VL>$k7S2-{YC6HLSQDA^1|u0dkz|^D5R;z{x@-!mb= zw3eZ<_F$VXBMuo9trU?sI>{y!^=ks10tpZ15hrw=q{2qRT|Uj{7QgG)|Lc9zhWmz+ zGfRJQFBI6W2;IL;#^y?@R?#h%jMK_k&r~gf*-@ob^2FPB`cL?^EynP$h0vcg`-9Q% z)=XuppE&BE-X%5w0>i zzSvj`OM6nFu1Ke9&*I82cl4)u;E8RrONDFOsCAF<`m)r1rMp^g5nYI^KgIS22@8P% zGR>%c`j3fW_K-uCg|h?kb9WK?4K%StJe7HytjQroDjjsN_*e2gHj^?Sez(K-;Lh~q z6pn!YiHm4O$fqAPvwTjJ3oCciVhAdA$I9!}#RYt)+wr?HrLyo-Y|I7gK&dul(*WcgB%?js6MrvO-Hs!sHCxN}e zOf(NEp=U%8cjdxHCKU<&MVhAbz?DvKNanv9KF7+W*rmL(hf5v&6m+)n^%T6F1T&SD z%6^IbOExD_Nm8tw+SmeiD?wovJ`W?Lzc_v+`E6zL0kjocjEmI+n>b{qaEy3%d!HSu z&|an~Ov-h|ByqvYd$M^@=&ap@o3wWNROX0wDN6d^a`zAzE1}Al?A<3X5of^+^b*`8 zf`DJ`yvptehx<&i!0Z@T~1D4@r*NBfLh! zsX88}^0T3z3^NM}gS)GUI#qHZaGe&Zd{JrCnz{+p=@wtLy8iRTV%$d^D$#fT^(5jCr7sm5+F zJI4@cis+>GX|?Jpwc|?RbRM*kpSW?#(xp-EmC_(Dc=J){3lxh}BsOL&Q;BgV@t%jn zztIC|)3IbXI92XsTpeb-m0Uiyfy;a>aQ0S-wZrKsZ>m8AMf#_ca7*7XYl4A6%R$?5 zlAL+Ew!JS;4ZZj!LThKbO|uzCzu5u6Q4>EhHgf+0JwEv-qNS0xasSkDoY86Ovd(VV z6EZ`dg>-L01*Gn8+AOL$Z#ra+RJ@S!yVyuUkLK4=$YX$o2f%^@d5@hz{>7aIcHCw^ z6Zt(q7>Hb}<4m?(U!d$vsi1agE2!>u?PHI9~`4_K~j7Glqi|XqP5a)LDmr5sYR@TLnBL92(Au zmI(|9W$BJncIox_8cEvG6QG5ZhjTq!8|?n9)7ln0@8R_4U1~fWKQwRB;_(uvX2e{W z@u9PamOS_8_Mqoj-eOb>6+l=Z---J%kP<($5M9Cdg%e)iq*xsH@gw1$fbLLeyWapa zu9iupqQ9Kb_c|`(L%A@<)Zu_3ce{sxfXlVUFJAGyhgF%0DLb**M)BzEji5|t+3h!% zMn7>Lf86=m=x&j8-!6Q`koSS=3@cj){sARc?W@JiY*~H-{9AG1F21-isveo*VMI><$@RYI8 zak5kozxny?8Krrk7=R;!EUYO5Xk6i5F8N00yuZ!aL zJ3_b9cnzhdYc5;>L)QI#Z;DLcj1*;Ni`y(GFTxWyD>rBcKi1Kb3#teQYB``BDNxUK z{-5U8|0aao;a9i+JnXD0Mn6r!sv|a$-Z6MZ^0~*0YBjaWW*3f<-5)2tFRPSqN$p7c z+`1F_)W7x^=J{Y!e&RLDYTD2I6q~Mj%TfJj)f})kCocM?>_8nA$qxt_m#(I3R6zf! z;HVZ8&CM?*$8kEjL7klK9BVM8*@Rn!k{>44n)-IiFf7e#KBry-TisA_ZF|oE6yiLT zL&|WvRWq!k3Z*@!>mo}&&~B58TILnv;E*|qo01uBI^+jdi;;?nTK+7-q2hOgziO+wmDqrK?C% zFT$IZ2vLn_gW$V-}V8^R@LQgeYm%8fEvMj2rZDuxX5~bm^m0ki%nt%g4Hl{5vb1#ocmsPl z2AhEa%)%5bvFU*8b^}Yved41F(~w1M_1HIAYLWYMBLniJS*XJB-#`~5RG-%o|01Uo zr!usye1Q?T_Xry@CFzmaLwFc6c=(#q7@tpTw#4P81opI<$*$<(HaE`-v(9H-4}dVU zBdvwNi`B+FgHm4DT79lvG8N3G;yRk3c_Y5}S(Q5HOwZ$2+lkp$G1U)%J_{KZF{KBd z8t-e=-9B^N&)-g@-wEm@h)&0iJhkd@7HK4#Kb_H z%N*HT=cWjXo z7?ay4YS|)ikSx@89R6%(02YmjQ_m=)CYF$haUD_eGNQwK7VHpgd`U+^n@ZQ4|fx%S(p3OqU*?P_5VNF5ocp0T@R z{bNGVHE&}f4K8@^13H650bzI<$|Fmko7Fgxq($QW zW@k#Ie4CJIXlL)>P!U59IuS4N)WLL+2u44>Q897a-K^@x6$w0;RFlQ*-UHGLreK{t4xw#- zQkYzPRnPK>))aAm?6r%A5!oj*E(_!-Q&Ef%qc>Ul6xRrw3-3{5PK5ph)*s$V9NIo< zDWqY>iGa3ytjr5>*OO<#c}E5sgNEL7A@}JlCChQuuCZA*_C&>;sEFl1m*rV*bVE>- zQr)aYZGt3Qf?hy82W-7Er%8rGVUoe8As(H!69k%y?;Uhg(n{v26k+<1v>W6syYj+% z>yw=4^BQGXCC+cdZ=5q64@W_!R9T5kdr_T|z7M*jS*0f4%Sm(aOf$UyaQH0B#j=aV zl#*pPKF87Oh?xWm? z4@)rv)PXD5+7`a)0Jq=QA5xg3@17rUqd$e@;fM%3*b8*WZ^u{2t}xF41rv5+6asD) zvBBWE&P(>UnOZsWrVK}izb)3kC-xd@1}r<7z`c!x|5O!Pno2~4qAs^LS~}#h1zn(D z>?1UfZW&*rG)o+$l}3sj>eGh}Zp>AAE*FzBPd7W4KXj|YcR=4Z*~PS!e~Q0CNPPXT zvMymnWFud4v5sn`jDSh){v=u86^^yEo=gh9oPk~pe_jC;^6Hq@FtKAjaXuINO z&&pL}>{y|*&psdC>W)0ePMzYg7hT7J^)rUadU12J$0cDqG}7~I*DlLVX=tDeZ}1#1 zVEg<982*ovO8AbY^e6D!d*|=i&^Ozsw91~J0?ZB>W;+Ug50LqLNA$W! zhOqxOl;ajqskgtSF$fJc+qD%=9@MUFS-rbU$|oQ#SiMW5G`p0<&BSbW4uT)=p*xbe zX_d^r`AW8~;;241*RLEsgMX(7hsg(Z`JX=1e2KMWTfll^a>Otq>_|!=hHFoizuUh~ zj;9(XYUXiu|L+tu^5^6K&g5`5FE=W&J0{-zpNYLanxXvLDqDt{n)0{z=)phBXOH_| zrmMs#C>`)m*I!4W>ey!qeRj3ARWz1Wdj$VBjS{`siTdl59cFgbejvc}3L*FG=?BJL zooCU1ToPWr`WoT;`6Ij7Z2o8JAB&)GUp2^1xgV-J)T{IS*P}lRrp-A1={oyA$A}7L zcl|%@=ied4#!nc1IzUV+mi6M*>SZcFmUwso(Ik{NA>th*fTiFislz+pczYBt@eE?k zd{hsZ{_e}($p2j2|Nntke)+nz5I!eXSLU21pKcJ@pX;-leNDq+2XHsr8JBc#ZJkiic3MJ2V;1F70GhihH z_-6+ShTnWk;}c|(lUrT7YBfx^TLQA8!tQ(=1zz*t&Y1q8fyBZ|tSQAd7U7Aq%JfHv zUVw%K1cFXT{;{}jURx8vlKLM?<@BvS`$wX=V_Rx0t0-a*Gm$GY#0=Zt3eUIgXSvC3 zx@Sy*d5?rjNMV79_Ai3M#swv4a!cA|8Uxjzzk|1NX8Fvb85%sJov9saYgcy{;r5M! zaEkcRaNa4QlybM~q9rLE3DS>>yBvIUrM-nLW>HoL+Zs2ZdsryNlB_i zid(wgu!7Ie)Z~@0`MVbh{G;dmXMP^_Rt;0qMt+IL9N0xtYX4=;m0v#Sdr$oiI2w_k z;t*B=!Dm?ijl99hw;ll-`S}pc5{bag#S#NqJ^lTp6UkyeJ+0#1m`8t?G*xh>S$cAA z7|M<;A$^i`8>k4Z9F19J+X<~Ql#2J?jV{2rj9lK4h_S60_R`Vt!LZU+4R8IN=+r~R zf_?OFd=Z0Id>O)F*XHtB7d7bS(4~$)5&Q~12uG;~3IF>(i8OhB($iUC8DuY`eni&uS5$0I3Qe!evx&si93 z<{qdiI!O9+Ve*v{+uLv>47XVQMDr zv^9{kFX_QEKeqg8+{c|wWxhSW#UA(fUYL+mV*R;U)zFOr;8g;y<#SmB%*KV-Z(FhC zFTmy?U$%MeNNzB^LBtQ+*$;t;5*z%kOTbe8YxRGrZ#^$Yd?SYC7l}ld>asGOsTCP^ z-+Xjbv5&}NHYuYfuJ}qh(0b$s7kQ}k=SRlLHHccHv)B&Yh=^q#|z z>brD$*oF6slz;+PiqECY)JKZ^Zt{kYBLi5xQ~fe~D39bj+{Ss)C3`I4Tfn0UFBp5$ z>ErK?%zsyJ_CF~!Q?YERC}z4Ly2W1=@!HHV+cZ{o%tSLIxnCgD+Xak?MRy!TW_A;9FxI-<+4`CMk#-pA4q+Kz3F}2K-MGR8KJ`%<}Ov^!3jw*+25)(e=j)A zS14q4H;WoCI6lI9wy!FB*hTGZRxCV0Gh!dh2nb&2&qo>I>|JMoz==oDO2*Rqjh{;m ze>|USYWg+n0f^>5JA^M^R-^bv7Qu*=N`Qr zhv)GcL@y$HBsP-e#G#RFDZ6bQp7MT0j=sgceqHN zswlq3iWym{geujt5&lpftd08X@|!iCBiQyAc=wUu1Sr!y!KlGgtkK$nE-Ac~TOhIi ze^NZjn*~OJi|v)W8+C?8=Tbj%31J8liN%FVqu%i~goCm^gG%4iTb*Oj5FLMaY~S!( zc@z^Lk964?bFg(zPtw?Gmjc2dx+M)!P_Y`?H|Yo+|LM>`yU@h2eM+z+OAVq<;LjG% zsZ%PJt$$hp|6-9A*%f2=CsqAmkAaK`mL^?jV4Y>ZZq@y`kq7wivi@1hEhm;*!Ba{p z=H>h}rmU4DdBZ8WF7-uAN(s$FU=qE{N1UwzA0u1GTWpQj|2ZZlP}0%*#n@Nsu9%*2 zdrM6$>5Iz$d$QtsTiuG9Kj-DJEFN|xK^^Grzj=u;xVbx31B)@m{`i$LFE`~c`92%H zS8tp`dd2Q6tcDR_c>wMG+a$3^?Tiy}TM%1W2L7julz&ChcB4^jQutv;ac`52#Y13R z$3xY_0RMj^{@+%1xc^)O*sp(ud>F;=UY&oHq<{Xye#HHIPyF8p{_j5gLn8khdN_yb z%_8OwiMs+dmJjZD?V${={!(~iA@g4v`<%ACG7v`{ZpUly~0Et8r zu5LQSJs3`op41gySYQ@kdc$Rnu$WAOPkN8OtU3io933}_gwmAp8!k}KuvFuIX>;3Pkiw||F%andG&eYL~F6w%|KdLUn8|hyw z+qQ#aJ)%2EUTO|%BkQV)dv>k}xe+D7!)w=&a3(okA);@+gdOh~0=vX-n#)wD=Jj<@$MzijV$G{z2Gy{|<*RK(Mg zHNsZTYXXZ^y-{ynF0__#F%VvBJ%OHf6%s0Nf#!~cX$NA}OCt_3g#xFp>wUD43$!m^ z^GU)|L*O#LR;eE|2IUf|>2nj%&a}DsbRSctVnHB+?y^x9_r~wOF$vrGwi9thZHSNNslE*4t#X zpE#Q?K+T=JvgB1t_mNcFNkxof#bRMaDr%l7ORc!gr;8VR&>GhNNNri1M_Tc!5iWPP zh^KN4sE4EHx@*4Nv#HyD)OuEoj8Pz#E?=Bm#LS$lF|dAr=Qv~SJ&-wc1~fp zsmX;NK89}k%j&|sYkkFzHVr{J?6VXTZB_@)$=*;fCEEEnb%|O@+tV)AcaIdJueVU0 zN9eU0rpT3B1F63FoA|-!uPBFy0n;^j&Ck4C57eRG50(v_2n$SKcbn4Go7Vy^Z((?4 z4-XWojoK{+yv~+4TS@0YJuPQ{EVj@~4iIB8YGuB&hKkbMQ^xq4)JS2eX(5`9!3%uE z@dNq5(`&r0r1c*%+%nDgx7N7W=VAv7-#%tE)Q-|KZ_o99*8zQ4CJ*1tbIXE;aE`Hbdt}YtiQ+$;G)_nX&;~6IXYZ$B; zxhyNkPL%E}eUb@{Lp1XWvhq6bAr4myUGpo#-5w?7LvA&xsRuf)etB+NRb8kaExS_l zs#ntqjIgu_y*7g@_~~3NEZhmZ<-%~}El~vQH4&=bj(h5fT}R)~G;6`Tbe5=n&~)## zcD=1|y272MZg>Y#F`caHCdYjWAco~Q^kLpKhoCOGO=<5r1zi1PZ1LktP8g-Rm(QtK zwisHKz^Em^)N)o`1Q|CTUZ~AOOoE-(p{4g$?dH3RZs2%z@}k{>2voZ13(=#hWd8ob zMxRt~wJU-<8@UGB6=X5nJ4MM&wWmdr!Ev9e!#6~qa(9|1G=oXUbKX9ZYab4d{@8%N z>~fX8kkMnFNdO(8lz$0w98CZ6jlRA0y1735t3Lg!#0`hQO#M(@-*VI!2R|k3kaWK+ zxfL+vRx%@giA7Ossb)JGbhK8EemSv_zD3*L=!2~>7tl;}=x2@y- zy+056zE5_oW0ucJVf{L_zCy75pd|&LoLkt%tvMs(&_peW8|_4W6O&5^aB1SEvs9v! zmM1j!BqD>FpU3Vh#63c<4q2~ja`?>G>z6GcioOJdu=LwGIVnQPu&O4@W#dxz@7%g2 zzLS7$Tuh*x`K)tgKC zUy2Dc0vKX-ItQ8Ldp!YwD6N!?svm7Uuo4rzoYht^~LPe5<`rhy@S&-_@X;=kndSUGqZi07oZ+-|3&iu6(x zKuoJ&dXs%Rou?o(2c94L$q3)8oL%&O7sNY&z-M1C8bpH6la^*A>-;Dp00~OaMO*NN z_OO2%zplga>d%7GV^;Fbp*)*Opf|@bp}>oQ*Ynz2I;ZE+!1`)0`G8aqasL6Y>*^Bw z0LyBy{T3~xbvGuggJ06*TyGZ!3Ot_Hy5dwTDn9{sRhMSACwEXEkvJt33=!=!9n;X7 z7N3^{UqArgrcc~Iz{={}KJR42OZ@g{1By~NRNjjP{{e)8d`J2}-@b3CxJq@7(HAUh z$LkskF3DT*v(2M_(J{P-1C$%8i-(NeIRCC7ac_UKd6)OK}fxJG1%T zcMPKEx$2moXX@jOrlZ>8o{YY4-f(o4o+FU(pd(J7-3)SbOe0F%lX%H(Oa#H=GzAX^bdd<~B zVO(OL-^|A#E~9{FVq3-rx&v*rcUvAx4<){`jv#1=63uB+ZX z%E$A-*PE}=;bjkdZRv`D<$RO=2xT1au{=#m3nHY>dR2k}*3JNe4ye4keV;lJ2i0x4 znhY!ZALU(%KUD4Cek7D7Bqj-uv{j9p_J3}G||v%TYae!utgdH;s@`~~N8u5;hl^}Vj^oEvsb%`eGhW35xksXR(< z@A7Fe<01qy>f7Qkz~Jns!e&`PHz_bNx_Q8OgFoS|LBq%Vp>@h`;SB+mT2(nI);KboXyfCg_DA>B9vx(~!Iq%rh*cZC1{FfJ${A%pZ$+|aRvO#A|UI2Fp zZI^hwWr#E7L}tH1P7fGyX$V@NW93U-c}O#yLRS{u=^>jZ)5}+6{W*MePSq%bd{+!- zh>GfSc_U(E%6GWuPsh$Blb+gHE2nkA7&RMVF|Yf!$rhqiQVbo3F7P9aMDs-4GX#4* z-;}y^4BhIGUZ?2xYgorN1?n~1o^uWxI`wkh48v$)l8~oA?#8}%(p6Ze^t zS{nU5nbrbh*B&HH4ApNMO=vxK?{mu6*^zA(^J`s9_-cyzs(AGBR$KbKW@r#_w5848 z*7b0QDOIuQlH(-&JuwabOZ~4=4!wXV7v&jRz?B&h=Ch8`><9?I$n5?521B&oo_n9z zE=)gCMXl=%?6P-0jI>|wy3Ksmmw$@*^j#Klca`@1>^JIstk0fE-)wD;^ssNS&>qqA z`H*FNQK$!^SMbZNh$)bLSJ_kM7iXAY7;FuGjcEWgaL62X%u(g)V1JYV>3=C>+pr}k z(Hfw;>(6;T1Wa?{n6b#`l=5W}X}d=jyac-zM95Bvb|32o#$#LIA~>@gw0;)V>c@)= zjJ0A`6&0;%wJ1(qui0llU_ZJ=!Q+&I+Xmw*yO1XNXn-Urv6}4Co08lV1MLPA8g1Xy zw@%hnY`1$$n4OW%I~cd0{D<$twjx+3!=jfHQL$Y*4^$F;HmGlHAc z{aa&Od>LNhKdnNy3g`B)5tdNo+xn@8sgU&SDj1u5`9(tBp+F<64Xb`sbG-9>viw(>jn3#BQ#MjYI;`AyKrEFrj7R95`gVcKBV;4$gl8eW-`qWVL za8ZH-W$&fwJ>wH|BkD1W#vi>}&$D<#L?$&7Rn5xh)+CnwM?c<{`)nTu{A@h)H%p(> zI-)*$3tHAbJL?kW82p?MKdH$oR%!kW;{K3Q>Mel(h(o{ciaS}#|5w+>mjBKkTj65( zqIe4uo$|sUiM~`tYcZ_p{adb(rYRE5)WVRy-=QGko- zvqS0RG@Jeo{w=bGN7RVBZY3)c(V!`kVCUHZ{ZY26q1vP=z}Thh?tHLrWThng_J&os zLVSiIPkaw{#dEIZ*SlUt@i{%fk&J1~_&F>UAlWnH?*uI)H#|B)D))kB7sgJrU(Xx; z60MjN+w0^_U${(OlIDo>SUsvYKK4l-f}eaGYV*JRe$hgFigRA*9&tx#FM=}x2CuHX;o z85U>Ckn!_9)%zkbGy);mg)<$W$sz!WJHT{2{8x?FY|j3R^6!%N`@kbh#xz`mJQx>k z={wK8=B|)16KPUBCr7g%(yw`+T2&e}ed(OE;|l|vG{I8EU%%enfyl*nSq6Fk)$k7@ z)IK-CA!G2SfCt>>KxKO);vCKkAl3d_p`>swV)44s({pR>ZkCFOWaWHCl~cGR$BC{c z6=IepPaw<6S1)cg)pTXgvv=PDeD+$$?`dNYb1idgVY;Tv6b%Dm}%Pbo0A1U`CqeH&sR&O6o?`t4!Y6@x6m5N?0Ee4`d| zC0hI5MLcGPh)|x8o&YG~k@6Q?ubme(Ue#otsx7cIx!1_dcIws@n3AH54U*6^Mghwr z_pW%J349Ho812$$x(-c#STFZtc`id8`dVe!tvPIrUE{zl7zah$e`JEZ)qm^HLSgf+ zn=?CR^w5SE*<;|NsdozW{FTlHLd#8RVIx4FMcRr!diLT8lO-k9Jr_B^JKG9A7tNAt>-uv} z8007jvea(OX?pZROS~`@8&{M~^k++waHBB$tA0`pPF=~1PTo%~s#*kQ-iH8`if_2HIHDC=~3G$|uYwi7dkXnJ4{K7wEVc_Tt^8+I6t73;u4e?P`#0q=oe zWk02w#-e?^_;@WL(ME%&9i!6z>sJhxo9mTqZ<&7G^#w>b^|CR7ANu%CAprA8dldRA zj%h}=a+zB!_eJZ_Z9$|PE2&1VsW$>ix**cW*6m84vI^%0tg>LIQUXb9T|odc2+Tpu zsjY3-In-K*Rz28zhn?RLM!m**R+Y&&8wi7&fBafEqd{k$1V0U$IKFK&N!Km+(#C7I z1hHH?cxkuw>ucbQ(n4COLtOeQ9`t*%uA*jQP=Y;6gSM6T`y)Jddnn?C|De)~mfore zw@~*<3w{-(eaA0#Qf@qp??M6V3n?ju(?;9MS`(F*IM9AG5J75^%emVHwU1b{+Myy= zqxZ9x$%h7M%XpqUIbJ5o&EjY?eq7-ai`3?yXS1V}qYji6u}j%azB4boQ%boeX| zq!e4wVI8JcWWPa`Xea=!>tczdW-0GU4NY7nVyYg5tclFm@Dhmp^k%P+AdPoVB%>{n z0AW|E6mZs}QA%p!Hm{Gg&1o%a;kHXCb~~;!vf}O>qcfJ@6SxdmD+^0#v6U-5H-0E9 z!b1XaO*;4|?E!^R%0a^f<>5)W<-V<6=G>L$lPyb^u53Qeo~k4@S1RP_bwMrDQWkwT zcgjgEH`oc=L%o9(=Q`h6Wo|@shTW&|TO4{y0Hej`FZ-d2xxl-66A<@+)Y-9l;$fSQ zQ1JYDD|m@nkHI&6KoB-n5F~TE11ENesa40t1;>~Dp3n5xt;#;u>u)eB<18k*S5S4-&kIa@XND{Iq9a{3?Z_ubc zfCVbL{D+~(xy6u9z!Ie(EIX-^_^ywlxwAK+Jds_ADeJ1a6H6=^5UH_(px7+)Smts0FG>Xk=e7HUq00Kn7bL>D*IxXdBj!3EYJ6!5@Nb7I-tQf6S{d^;AB{RiN zr_A<3U4JZ)YKBs<9WgAQ^E|hR??i;^na_G!d+tV7G}>b;kFfG$T2ZNCdriJS&vNZW zo#1$aozL)G4uiYe(n;;p@0M@S5~h9ndDz?$x=wVZfc-_y9S+IiP|8x~b??2U!K>ZQ z0^0Ru6S1e7Ab8Q`&rpZOR)^pM;I9@l?;*sIT{se!oD^#h6UI^yUg%y>mhxAAaep-u zZ@sjsHLN5^dnBT@Avu#`vAF~-J7OB4^U`JzSQd@(*1R@rJOlBcv|RAPfWuXv&)ZzC zxSVH239z~gT~jY38V@e$xi+6KlF+$?qr&!KL}fuHP6FHwzG(GS0(v(?!_MB>4?C4e%YrdZPcC z-(~311=a`|dt19E`a&aSKCq9Eq*<0L8igk*9O)~lbgm#t?Bi0IYrQi4Mf#}z>>E@f= zO?9BSZRiCiw{J$|8j%erIj;&N2p08n002Qq_v{X9`>t;=7vavG=l}YHMXEJ=lZ|g; zs`N-z6*mkiB!$5OApRlefJ7ZP+GoZPb@cWK1OSg$C?l|atHdA%+*XC`$xut8#vugL zw)(!*yFDni7c`%|lbGB!9K!n%V-EQjWZVvPQKCu{(t6!E9^5l^FI0)eIi-v>I5}%7 z%`3C0v9bEITCiJciEyTL|FW^B3jgqv?moppn=5aVcVLnJ8mNu6m8Stvu&>0^UPwl2 zl2S+GY>g1*yP84lGLF31LEz7t$&6Xd^Kg{C$Cv=~Kwo74M_;(c##0zx z2smHErPDMR@ZHItHd;1SZ3j(N4V=4mKc6PM#A@Wdmoak#$H0wu^|I|p&u(Rp{>!ga zW3P@@le`41fB*W4JuD+fhzVMyKqWb6@)B6zUl?k1 zfaS&0Y+DUr&q1 z8AMS6s`!a-y7drCO-RKlpWZNYlT@BdvG1)8bCN5`8N*}|?H=Hxs7;C`9tvhrtZb)N1VrF{T85DYDLR#n z!DH$m!`zJIrlik>tBi6-%yL#hh3LU>)$!Onx>Yh##p-ZEU1NOlY_ja<=c#gu3ra-Y z4ohFysh*1Ej90HUp4pC*w{LR4m?Pqr2kx?UcrP2K*BDP;C2blSC}Ri4_8eM=NUS-?OiDWRYID7J*>gB z(M50*i}IEq=yM9?3|H0LM>T?-I(X9BhG6y8e5avp-5rVIkCEitcYuB3VX$W^*uDV2 z??NxqR-T+Uo^revEyZh^Md$S`#F`T8e~z=$5hT-_P-Kn~)O?Ho)ekGJWF zhjElh4$7o0FGBfQxM%84wf$jZW{Ohsr4Tq@b3uoS49+xX2N^-Ax}*2y)`?8Z+$B5}I)AZNOG{={jfg-C#Asr<-2@2Ur7ZNsig)P#YLJG2-%^@UKv>6^By?fuQw zsYLMdlEV`YjHK{-@M1#*o#!Is66nf$y(*yxvwDQtkc`*&T(K&LvT7H<1N1DQozm9Xg@!R1x+OZ3&k_8LNT*+h zD@V3g63n>tHr+`669bAxy4>eo*Yta9~-@|=L$eO~F>Msf|9EbEQ zAOQwF)zRAYy}K{ZoE=W?9=iFXb^miupFqC-UL<8(gC%SG__)0TQrPuVH`F9K98<6A zOtAHNELZOSzu~aOqB}C^RCMewQ8Bmp8!oMkU2k(psrz=7^QqQc8U2vO0IMO1KT_S$ zYVOG{m>He1B+xZE4OEz|d~PSz)XGw_18=$0wC(NipsLo|A2RcyFKj}2&0$QCU-2Vi z-eW{PXaDQu-i&D->AB*LPlA<(P!k*bCsm%uzHDJ{b$*HKbtP#N5ob<|4OYhgk>)-1 zWm-Y`*K#KKsF$J zhxGfnd~5vNX4+-Vphp_V{PsCg#gy9pF)?Y2vy>hoUl7Fb9J|OAS~o!##Nn>x3X$#2 z$Is*+^k+=}t@@pK%!hi;J|$1(Je5#BkbA>#MDo&CdC2`zy1@0Mrg zUPb!7EM2HR5(DnA!%BF)rQX^R^xmBQ-Q;IZ_0OJntCI-6+OdR$Nwtp4$SgEIFx$}* zIy#GIIN1r)ZJdu(Yg+d?QwET;G-8C?|RdLfX G{(k@`1Jg|a literal 0 HcmV?d00001 diff --git a/Deformable-DETR/main.py b/Deformable-DETR/main.py new file mode 100644 index 0000000..fc6ccfa --- /dev/null +++ b/Deformable-DETR/main.py @@ -0,0 +1,326 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + + +import argparse +import datetime +import json +import random +import time +from pathlib import Path + +import numpy as np +import torch +from torch.utils.data import DataLoader +import datasets +import util.misc as utils +import datasets.samplers as samplers +from datasets import build_dataset, get_coco_api_from_dataset +from engine import evaluate, train_one_epoch +from models import build_model + + +def get_args_parser(): + parser = argparse.ArgumentParser('Deformable DETR Detector', add_help=False) + parser.add_argument('--lr', default=2e-4, type=float) + parser.add_argument('--lr_backbone_names', default=["backbone.0"], type=str, nargs='+') + parser.add_argument('--lr_backbone', default=2e-5, type=float) + parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+') + parser.add_argument('--lr_linear_proj_mult', default=0.1, type=float) + parser.add_argument('--batch_size', default=2, type=int) + parser.add_argument('--weight_decay', default=1e-4, type=float) + parser.add_argument('--epochs', default=50, type=int) + parser.add_argument('--lr_drop', default=40, type=int) + parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+') + parser.add_argument('--clip_max_norm', default=0.1, type=float, + help='gradient clipping max norm') + + + parser.add_argument('--sgd', action='store_true') + + # Variants of Deformable DETR + parser.add_argument('--with_box_refine', default=False, action='store_true') + parser.add_argument('--two_stage', default=False, action='store_true') + + # Model parameters + parser.add_argument('--frozen_weights', type=str, default=None, + help="Path to the pretrained model. If set, only the mask head will be trained") + + # * Backbone + parser.add_argument('--backbone', default='resnet50', type=str, + help="Name of the convolutional backbone to use") + parser.add_argument('--dilation', action='store_true', + help="If true, we replace stride with dilation in the last convolutional block (DC5)") + parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), + help="Type of positional embedding to use on top of the image features") + parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float, + help="position / size * scale") + parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels') + + # * Transformer + parser.add_argument('--enc_layers', default=6, type=int, + help="Number of encoding layers in the transformer") + parser.add_argument('--dec_layers', default=6, type=int, + help="Number of decoding layers in the transformer") + parser.add_argument('--dim_feedforward', default=1024, type=int, + help="Intermediate size of the feedforward layers in the transformer blocks") + parser.add_argument('--hidden_dim', default=256, type=int, + help="Size of the embeddings (dimension of the transformer)") + parser.add_argument('--dropout', default=0.1, type=float, + help="Dropout applied in the transformer") + parser.add_argument('--nheads', default=8, type=int, + help="Number of attention heads inside the transformer's attentions") + parser.add_argument('--num_queries', default=300, type=int, + help="Number of query slots") + parser.add_argument('--dec_n_points', default=4, type=int) + parser.add_argument('--enc_n_points', default=4, type=int) + + # * Segmentation + parser.add_argument('--masks', action='store_true', + help="Train segmentation head if the flag is provided") + + # Loss + parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', + help="Disables auxiliary decoding losses (loss at each layer)") + + # * Matcher + parser.add_argument('--set_cost_class', default=2, type=float, + help="Class coefficient in the matching cost") + parser.add_argument('--set_cost_bbox', default=5, type=float, + help="L1 box coefficient in the matching cost") + parser.add_argument('--set_cost_giou', default=2, type=float, + help="giou box coefficient in the matching cost") + + # * Loss coefficients + parser.add_argument('--mask_loss_coef', default=1, type=float) + parser.add_argument('--dice_loss_coef', default=1, type=float) + parser.add_argument('--cls_loss_coef', default=2, type=float) + parser.add_argument('--bbox_loss_coef', default=5, type=float) + parser.add_argument('--giou_loss_coef', default=2, type=float) + parser.add_argument('--focal_alpha', default=0.25, type=float) + + # dataset parameters + parser.add_argument('--dataset_file', default='coco') + parser.add_argument('--coco_path', default='./data/coco', type=str) + parser.add_argument('--coco_panoptic_path', type=str) + parser.add_argument('--remove_difficult', action='store_true') + + parser.add_argument('--output_dir', default='', + help='path where to save, empty for no saving') + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=42, type=int) + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument('--eval', action='store_true') + parser.add_argument('--num_workers', default=2, type=int) + parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory') + + return parser + + +def main(args): + utils.init_distributed_mode(args) + print("git:\n {}\n".format(utils.get_sha())) + + if args.frozen_weights is not None: + assert args.masks, "Frozen training is meant for segmentation only" + print(args) + + device = torch.device(args.device) + + # fix the seed for reproducibility + seed = args.seed + utils.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + random.seed(seed) + + model, criterion, postprocessors = build_model(args) + model.to(device) + + model_without_ddp = model + n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) + print('number of params:', n_parameters) + + dataset_train = build_dataset(image_set='train', args=args) + dataset_val = build_dataset(image_set='val', args=args) + + if args.distributed: + if args.cache_mode: + sampler_train = samplers.NodeDistributedSampler(dataset_train) + sampler_val = samplers.NodeDistributedSampler(dataset_val, shuffle=False) + else: + sampler_train = samplers.DistributedSampler(dataset_train) + sampler_val = samplers.DistributedSampler(dataset_val, shuffle=False) + else: + sampler_train = torch.utils.data.RandomSampler(dataset_train) + sampler_val = torch.utils.data.SequentialSampler(dataset_val) + + batch_sampler_train = torch.utils.data.BatchSampler( + sampler_train, args.batch_size, drop_last=True) + + data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, + collate_fn=utils.collate_fn, num_workers=args.num_workers, + pin_memory=True) + data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val, + drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers, + pin_memory=True) + + # lr_backbone_names = ["backbone.0", "backbone.neck", "input_proj", "transformer.encoder"] + def match_name_keywords(n, name_keywords): + out = False + for b in name_keywords: + if b in n: + out = True + break + return out + + for n, p in model_without_ddp.named_parameters(): + print(n) + + param_dicts = [ + { + "params": + [p for n, p in model_without_ddp.named_parameters() + if not match_name_keywords(n, args.lr_backbone_names) and not match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad], + "lr": args.lr, + }, + { + "params": [p for n, p in model_without_ddp.named_parameters() if match_name_keywords(n, args.lr_backbone_names) and p.requires_grad], + "lr": args.lr_backbone, + }, + { + "params": [p for n, p in model_without_ddp.named_parameters() if match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad], + "lr": args.lr * args.lr_linear_proj_mult, + } + ] + if args.sgd: + optimizer = torch.optim.SGD(param_dicts, lr=args.lr, momentum=0.9, + weight_decay=args.weight_decay) + else: + optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, + weight_decay=args.weight_decay) + lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop) + + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + model_without_ddp = model.module + + if args.dataset_file == "coco_panoptic": + # We also evaluate AP during panoptic training, on original coco DS + coco_val = datasets.coco.build("val", args) + base_ds = get_coco_api_from_dataset(coco_val) + else: + base_ds = get_coco_api_from_dataset(dataset_val) + + if args.frozen_weights is not None: + checkpoint = torch.load(args.frozen_weights, map_location='cpu') + model_without_ddp.detr.load_state_dict(checkpoint['model']) + + output_dir = Path(args.output_dir) + if args.resume: + if args.resume.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.resume, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.resume, map_location='cpu') + missing_keys, unexpected_keys = model_without_ddp.load_state_dict(checkpoint['model'], strict=False) + unexpected_keys = [k for k in unexpected_keys if not (k.endswith('total_params') or k.endswith('total_ops'))] + if len(missing_keys) > 0: + print('Missing Keys: {}'.format(missing_keys)) + if len(unexpected_keys) > 0: + print('Unexpected Keys: {}'.format(unexpected_keys)) + if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint: + import copy + p_groups = copy.deepcopy(optimizer.param_groups) + optimizer.load_state_dict(checkpoint['optimizer']) + for pg, pg_old in zip(optimizer.param_groups, p_groups): + pg['lr'] = pg_old['lr'] + pg['initial_lr'] = pg_old['initial_lr'] + print(optimizer.param_groups) + lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) + # todo: this is a hack for doing experiment that resume from checkpoint and also modify lr scheduler (e.g., decrease lr in advance). + args.override_resumed_lr_drop = True + if args.override_resumed_lr_drop: + print('Warning: (hack) args.override_resumed_lr_drop is set to True, so args.lr_drop would override lr_drop in resumed lr_scheduler.') + lr_scheduler.step_size = args.lr_drop + lr_scheduler.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups)) + lr_scheduler.step(lr_scheduler.last_epoch) + args.start_epoch = checkpoint['epoch'] + 1 + # check the resumed model + if not args.eval: + test_stats, coco_evaluator = evaluate( + model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir + ) + + if args.eval: + test_stats, coco_evaluator = evaluate(model, criterion, postprocessors, + data_loader_val, base_ds, device, args.output_dir) + if args.output_dir: + utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth") + return + + print("Start training") + start_time = time.time() + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + sampler_train.set_epoch(epoch) + train_stats = train_one_epoch( + model, criterion, data_loader_train, optimizer, device, epoch, args.clip_max_norm) + lr_scheduler.step() + if args.output_dir: + checkpoint_paths = [output_dir / 'checkpoint.pth'] + # extra checkpoint before LR drop and every 5 epochs + if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 5 == 0: + checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth') + for checkpoint_path in checkpoint_paths: + utils.save_on_master({ + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'lr_scheduler': lr_scheduler.state_dict(), + 'epoch': epoch, + 'args': args, + }, checkpoint_path) + + test_stats, coco_evaluator = evaluate( + model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir + ) + + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + **{f'test_{k}': v for k, v in test_stats.items()}, + 'epoch': epoch, + 'n_parameters': n_parameters} + + if args.output_dir and utils.is_main_process(): + with (output_dir / "log.txt").open("a") as f: + f.write(json.dumps(log_stats) + "\n") + + # for evaluation logs + if coco_evaluator is not None: + (output_dir / 'eval').mkdir(exist_ok=True) + if "bbox" in coco_evaluator.coco_eval: + filenames = ['latest.pth'] + if epoch % 50 == 0: + filenames.append(f'{epoch:03}.pth') + for name in filenames: + torch.save(coco_evaluator.coco_eval["bbox"].eval, + output_dir / "eval" / name) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()]) + args = parser.parse_args() + if args.output_dir: + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + main(args) diff --git a/Deformable-DETR/models/__init__.py b/Deformable-DETR/models/__init__.py new file mode 100644 index 0000000..9a59c33 --- /dev/null +++ b/Deformable-DETR/models/__init__.py @@ -0,0 +1,15 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +from .deformable_detr import build + + +def build_model(args): + return build(args) + diff --git a/Deformable-DETR/models/backbone.py b/Deformable-DETR/models/backbone.py new file mode 100644 index 0000000..4bfe705 --- /dev/null +++ b/Deformable-DETR/models/backbone.py @@ -0,0 +1,138 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +""" +Backbone modules. +""" +from collections import OrderedDict + +import torch +import torch.nn.functional as F +import torchvision +from torch import nn +from torchvision.models._utils import IntermediateLayerGetter +from typing import Dict, List + +from util.misc import NestedTensor, is_main_process + +from .position_encoding import build_position_encoding + + +class FrozenBatchNorm2d(torch.nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + Copy-paste from torchvision.misc.ops with added eps before rqsrt, + without which any other models than torchvision.models.resnet[18,34,50,101] + produce nans. + """ + + def __init__(self, n, eps=1e-5): + super(FrozenBatchNorm2d, self).__init__() + self.register_buffer("weight", torch.ones(n)) + self.register_buffer("bias", torch.zeros(n)) + self.register_buffer("running_mean", torch.zeros(n)) + self.register_buffer("running_var", torch.ones(n)) + self.eps = eps + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + num_batches_tracked_key = prefix + 'num_batches_tracked' + if num_batches_tracked_key in state_dict: + del state_dict[num_batches_tracked_key] + + super(FrozenBatchNorm2d, self)._load_from_state_dict( + state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs) + + def forward(self, x): + # move reshapes to the beginning + # to make it fuser-friendly + w = self.weight.reshape(1, -1, 1, 1) + b = self.bias.reshape(1, -1, 1, 1) + rv = self.running_var.reshape(1, -1, 1, 1) + rm = self.running_mean.reshape(1, -1, 1, 1) + eps = self.eps + scale = w * (rv + eps).rsqrt() + bias = b - rm * scale + return x * scale + bias + + +class BackboneBase(nn.Module): + + def __init__(self, backbone: nn.Module, train_backbone: bool, return_interm_layers: bool): + super().__init__() + for name, parameter in backbone.named_parameters(): + if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: + parameter.requires_grad_(False) + if return_interm_layers: + # return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} + return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"} + self.strides = [8, 16, 32] + self.num_channels = [512, 1024, 2048] + else: + return_layers = {'layer4': "0"} + self.strides = [32] + self.num_channels = [2048] + self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) + + def forward(self, tensor_list: NestedTensor): + xs = self.body(tensor_list.tensors) + out: Dict[str, NestedTensor] = {} + for name, x in xs.items(): + m = tensor_list.mask + assert m is not None + mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] + out[name] = NestedTensor(x, mask) + return out + + +class Backbone(BackboneBase): + """ResNet backbone with frozen BatchNorm.""" + def __init__(self, name: str, + train_backbone: bool, + return_interm_layers: bool, + dilation: bool): + norm_layer = FrozenBatchNorm2d + backbone = getattr(torchvision.models, name)( + replace_stride_with_dilation=[False, False, dilation], + pretrained=is_main_process(), norm_layer=norm_layer) + assert name not in ('resnet18', 'resnet34'), "number of channels are hard coded" + super().__init__(backbone, train_backbone, return_interm_layers) + if dilation: + self.strides[-1] = self.strides[-1] // 2 + + +class Joiner(nn.Sequential): + def __init__(self, backbone, position_embedding): + super().__init__(backbone, position_embedding) + self.strides = backbone.strides + self.num_channels = backbone.num_channels + + def forward(self, tensor_list: NestedTensor): + xs = self[0](tensor_list) + out: List[NestedTensor] = [] + pos = [] + for name, x in sorted(xs.items()): + out.append(x) + + # position encoding + for x in out: + pos.append(self[1](x).to(x.tensors.dtype)) + + return out, pos + + +def build_backbone(args): + position_embedding = build_position_encoding(args) + train_backbone = args.lr_backbone > 0 + return_interm_layers = args.masks or (args.num_feature_levels > 1) + backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) + model = Joiner(backbone, position_embedding) + return model diff --git a/Deformable-DETR/models/deformable_detr.py b/Deformable-DETR/models/deformable_detr.py new file mode 100644 index 0000000..f1415e8 --- /dev/null +++ b/Deformable-DETR/models/deformable_detr.py @@ -0,0 +1,492 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +""" +Deformable DETR model and criterion classes. +""" +import torch +import torch.nn.functional as F +from torch import nn +import math + +from util import box_ops +from util.misc import (NestedTensor, nested_tensor_from_tensor_list, + accuracy, get_world_size, interpolate, + is_dist_avail_and_initialized, inverse_sigmoid) + +from .backbone import build_backbone +from .matcher import build_matcher +from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, + dice_loss, sigmoid_focal_loss) +from .deformable_transformer import build_deforamble_transformer +import copy + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +class DeformableDETR(nn.Module): + """ This is the Deformable DETR module that performs object detection """ + def __init__(self, backbone, transformer, num_classes, num_queries, num_feature_levels, + aux_loss=True, with_box_refine=False, two_stage=False): + """ Initializes the model. + Parameters: + backbone: torch module of the backbone to be used. See backbone.py + transformer: torch module of the transformer architecture. See transformer.py + num_classes: number of object classes + num_queries: number of object queries, ie detection slot. This is the maximal number of objects + DETR can detect in a single image. For COCO, we recommend 100 queries. + aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. + with_box_refine: iterative bounding box refinement + two_stage: two-stage Deformable DETR + """ + super().__init__() + self.num_queries = num_queries + self.transformer = transformer + hidden_dim = transformer.d_model + self.class_embed = nn.Linear(hidden_dim, num_classes) + self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) + self.num_feature_levels = num_feature_levels + if not two_stage: + self.query_embed = nn.Embedding(num_queries, hidden_dim*2) + if num_feature_levels > 1: + num_backbone_outs = len(backbone.strides) + input_proj_list = [] + for _ in range(num_backbone_outs): + in_channels = backbone.num_channels[_] + input_proj_list.append(nn.Sequential( + nn.Conv2d(in_channels, hidden_dim, kernel_size=1), + nn.GroupNorm(32, hidden_dim), + )) + for _ in range(num_feature_levels - num_backbone_outs): + input_proj_list.append(nn.Sequential( + nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), + nn.GroupNorm(32, hidden_dim), + )) + in_channels = hidden_dim + self.input_proj = nn.ModuleList(input_proj_list) + else: + self.input_proj = nn.ModuleList([ + nn.Sequential( + nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1), + nn.GroupNorm(32, hidden_dim), + )]) + self.backbone = backbone + self.aux_loss = aux_loss + self.with_box_refine = with_box_refine + self.two_stage = two_stage + + prior_prob = 0.01 + bias_value = -math.log((1 - prior_prob) / prior_prob) + self.class_embed.bias.data = torch.ones(num_classes) * bias_value + nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) + nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0) + for proj in self.input_proj: + nn.init.xavier_uniform_(proj[0].weight, gain=1) + nn.init.constant_(proj[0].bias, 0) + + # if two-stage, the last class_embed and bbox_embed is for region proposal generation + num_pred = (transformer.decoder.num_layers + 1) if two_stage else transformer.decoder.num_layers + if with_box_refine: + self.class_embed = _get_clones(self.class_embed, num_pred) + self.bbox_embed = _get_clones(self.bbox_embed, num_pred) + nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0) + # hack implementation for iterative bounding box refinement + self.transformer.decoder.bbox_embed = self.bbox_embed + else: + nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0) + self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)]) + self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)]) + self.transformer.decoder.bbox_embed = None + if two_stage: + # hack implementation for two-stage + self.transformer.decoder.class_embed = self.class_embed + for box_embed in self.bbox_embed: + nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0) + + def forward(self, samples: NestedTensor): + """ The forward expects a NestedTensor, which consists of: + - samples.tensor: batched images, of shape [batch_size x 3 x H x W] + - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels + + It returns a dict with the following elements: + - "pred_logits": the classification logits (including no-object) for all queries. + Shape= [batch_size x num_queries x (num_classes + 1)] + - "pred_boxes": The normalized boxes coordinates for all queries, represented as + (center_x, center_y, height, width). These values are normalized in [0, 1], + relative to the size of each individual image (disregarding possible padding). + See PostProcess for information on how to retrieve the unnormalized bounding box. + - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of + dictionnaries containing the two above keys for each decoder layer. + """ + if not isinstance(samples, NestedTensor): + samples = nested_tensor_from_tensor_list(samples) + features, pos = self.backbone(samples) + + srcs = [] + masks = [] + for l, feat in enumerate(features): + src, mask = feat.decompose() + srcs.append(self.input_proj[l](src)) + masks.append(mask) + assert mask is not None + if self.num_feature_levels > len(srcs): + _len_srcs = len(srcs) + for l in range(_len_srcs, self.num_feature_levels): + if l == _len_srcs: + src = self.input_proj[l](features[-1].tensors) + else: + src = self.input_proj[l](srcs[-1]) + m = samples.mask + mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] + pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) + srcs.append(src) + masks.append(mask) + pos.append(pos_l) + + query_embeds = None + if not self.two_stage: + query_embeds = self.query_embed.weight + hs, init_reference, inter_references, enc_outputs_class, enc_outputs_coord_unact = self.transformer(srcs, masks, pos, query_embeds) + + outputs_classes = [] + outputs_coords = [] + for lvl in range(hs.shape[0]): + if lvl == 0: + reference = init_reference + else: + reference = inter_references[lvl - 1] + reference = inverse_sigmoid(reference) + outputs_class = self.class_embed[lvl](hs[lvl]) + tmp = self.bbox_embed[lvl](hs[lvl]) + if reference.shape[-1] == 4: + tmp += reference + else: + assert reference.shape[-1] == 2 + tmp[..., :2] += reference + outputs_coord = tmp.sigmoid() + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + outputs_class = torch.stack(outputs_classes) + outputs_coord = torch.stack(outputs_coords) + + out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]} + if self.aux_loss: + out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord) + + if self.two_stage: + enc_outputs_coord = enc_outputs_coord_unact.sigmoid() + out['enc_outputs'] = {'pred_logits': enc_outputs_class, 'pred_boxes': enc_outputs_coord} + return out + + @torch.jit.unused + def _set_aux_loss(self, outputs_class, outputs_coord): + # this is a workaround to make torchscript happy, as torchscript + # doesn't support dictionary with non-homogeneous values, such + # as a dict having both a Tensor and a list. + return [{'pred_logits': a, 'pred_boxes': b} + for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] + + +class SetCriterion(nn.Module): + """ This class computes the loss for DETR. + The process happens in two steps: + 1) we compute hungarian assignment between ground truth boxes and the outputs of the model + 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) + """ + def __init__(self, num_classes, matcher, weight_dict, losses, focal_alpha=0.25): + """ Create the criterion. + Parameters: + num_classes: number of object categories, omitting the special no-object category + matcher: module able to compute a matching between targets and proposals + weight_dict: dict containing as key the names of the losses and as values their relative weight. + losses: list of all the losses to be applied. See get_loss for list of available losses. + focal_alpha: alpha in Focal Loss + """ + super().__init__() + self.num_classes = num_classes + self.matcher = matcher + self.weight_dict = weight_dict + self.losses = losses + self.focal_alpha = focal_alpha + + def loss_labels(self, outputs, targets, indices, num_boxes, log=True): + """Classification loss (NLL) + targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] + """ + assert 'pred_logits' in outputs + src_logits = outputs['pred_logits'] + + idx = self._get_src_permutation_idx(indices) + target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) + target_classes = torch.full(src_logits.shape[:2], self.num_classes, + dtype=torch.int64, device=src_logits.device) + target_classes[idx] = target_classes_o + + target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1], + dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device) + target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) + + target_classes_onehot = target_classes_onehot[:,:,:-1] + loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1] + losses = {'loss_ce': loss_ce} + + if log: + # TODO this should probably be a separate loss, not hacked in this one here + losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] + return losses + + @torch.no_grad() + def loss_cardinality(self, outputs, targets, indices, num_boxes): + """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes + This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients + """ + pred_logits = outputs['pred_logits'] + device = pred_logits.device + tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) + # Count the number of predictions that are NOT "no-object" (which is the last class) + card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) + card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) + losses = {'cardinality_error': card_err} + return losses + + def loss_boxes(self, outputs, targets, indices, num_boxes): + """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss + targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] + The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size. + """ + assert 'pred_boxes' in outputs + idx = self._get_src_permutation_idx(indices) + src_boxes = outputs['pred_boxes'][idx] + target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) + + loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') + + losses = {} + losses['loss_bbox'] = loss_bbox.sum() / num_boxes + + loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( + box_ops.box_cxcywh_to_xyxy(src_boxes), + box_ops.box_cxcywh_to_xyxy(target_boxes))) + losses['loss_giou'] = loss_giou.sum() / num_boxes + return losses + + def loss_masks(self, outputs, targets, indices, num_boxes): + """Compute the losses related to the masks: the focal loss and the dice loss. + targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] + """ + assert "pred_masks" in outputs + + src_idx = self._get_src_permutation_idx(indices) + tgt_idx = self._get_tgt_permutation_idx(indices) + + src_masks = outputs["pred_masks"] + + # TODO use valid to mask invalid areas due to padding in loss + target_masks, valid = nested_tensor_from_tensor_list([t["masks"] for t in targets]).decompose() + target_masks = target_masks.to(src_masks) + + src_masks = src_masks[src_idx] + # upsample predictions to the target size + src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], + mode="bilinear", align_corners=False) + src_masks = src_masks[:, 0].flatten(1) + + target_masks = target_masks[tgt_idx].flatten(1) + + losses = { + "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), + "loss_dice": dice_loss(src_masks, target_masks, num_boxes), + } + return losses + + def _get_src_permutation_idx(self, indices): + # permute predictions following indices + batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) + src_idx = torch.cat([src for (src, _) in indices]) + return batch_idx, src_idx + + def _get_tgt_permutation_idx(self, indices): + # permute targets following indices + batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) + tgt_idx = torch.cat([tgt for (_, tgt) in indices]) + return batch_idx, tgt_idx + + def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): + loss_map = { + 'labels': self.loss_labels, + 'cardinality': self.loss_cardinality, + 'boxes': self.loss_boxes, + 'masks': self.loss_masks + } + assert loss in loss_map, f'do you really want to compute {loss} loss?' + return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) + + def forward(self, outputs, targets): + """ This performs the loss computation. + Parameters: + outputs: dict of tensors, see the output specification of the model for the format + targets: list of dicts, such that len(targets) == batch_size. + The expected keys in each dict depends on the losses applied, see each loss' doc + """ + outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs' and k != 'enc_outputs'} + + # Retrieve the matching between the outputs of the last layer and the targets + indices = self.matcher(outputs_without_aux, targets) + + # Compute the average number of target boxes accross all nodes, for normalization purposes + num_boxes = sum(len(t["labels"]) for t in targets) + num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) + if is_dist_avail_and_initialized(): + torch.distributed.all_reduce(num_boxes) + num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() + + # Compute all the requested losses + losses = {} + for loss in self.losses: + kwargs = {} + losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes, **kwargs)) + + # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. + if 'aux_outputs' in outputs: + for i, aux_outputs in enumerate(outputs['aux_outputs']): + indices = self.matcher(aux_outputs, targets) + for loss in self.losses: + if loss == 'masks': + # Intermediate masks losses are too costly to compute, we ignore them. + continue + kwargs = {} + if loss == 'labels': + # Logging is enabled only for the last layer + kwargs['log'] = False + l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs) + l_dict = {k + f'_{i}': v for k, v in l_dict.items()} + losses.update(l_dict) + + if 'enc_outputs' in outputs: + enc_outputs = outputs['enc_outputs'] + bin_targets = copy.deepcopy(targets) + for bt in bin_targets: + bt['labels'] = torch.zeros_like(bt['labels']) + indices = self.matcher(enc_outputs, bin_targets) + for loss in self.losses: + if loss == 'masks': + # Intermediate masks losses are too costly to compute, we ignore them. + continue + kwargs = {} + if loss == 'labels': + # Logging is enabled only for the last layer + kwargs['log'] = False + l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs) + l_dict = {k + f'_enc': v for k, v in l_dict.items()} + losses.update(l_dict) + + return losses + + +class PostProcess(nn.Module): + """ This module converts the model's output into the format expected by the coco api""" + + @torch.no_grad() + def forward(self, outputs, target_sizes): + """ Perform the computation + Parameters: + outputs: raw outputs of the model + target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch + For evaluation, this must be the original image size (before any data augmentation) + For visualization, this should be the image size after data augment, but before padding + """ + out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] + + assert len(out_logits) == len(target_sizes) + assert target_sizes.shape[1] == 2 + + prob = out_logits.sigmoid() + topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) + scores = topk_values + topk_boxes = topk_indexes // out_logits.shape[2] + labels = topk_indexes % out_logits.shape[2] + boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) + boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) + + # and from relative [0, 1] to absolute [0, height] coordinates + img_h, img_w = target_sizes.unbind(1) + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) + boxes = boxes * scale_fct[:, None, :] + + results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] + + return results + + +class MLP(nn.Module): + """ Very simple multi-layer perceptron (also called FFN)""" + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + + +def build(args): + num_classes = 20 if args.dataset_file != 'coco' else 91 + if args.dataset_file == "coco_panoptic": + num_classes = 250 + device = torch.device(args.device) + + backbone = build_backbone(args) + + transformer = build_deforamble_transformer(args) + model = DeformableDETR( + backbone, + transformer, + num_classes=num_classes, + num_queries=args.num_queries, + num_feature_levels=args.num_feature_levels, + aux_loss=args.aux_loss, + with_box_refine=args.with_box_refine, + two_stage=args.two_stage, + ) + if args.masks: + model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None)) + matcher = build_matcher(args) + weight_dict = {'loss_ce': args.cls_loss_coef, 'loss_bbox': args.bbox_loss_coef} + weight_dict['loss_giou'] = args.giou_loss_coef + if args.masks: + weight_dict["loss_mask"] = args.mask_loss_coef + weight_dict["loss_dice"] = args.dice_loss_coef + # TODO this is a hack + if args.aux_loss: + aux_weight_dict = {} + for i in range(args.dec_layers - 1): + aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()}) + aux_weight_dict.update({k + f'_enc': v for k, v in weight_dict.items()}) + weight_dict.update(aux_weight_dict) + + losses = ['labels', 'boxes', 'cardinality'] + if args.masks: + losses += ["masks"] + # num_classes, matcher, weight_dict, losses, focal_alpha=0.25 + criterion = SetCriterion(num_classes, matcher, weight_dict, losses, focal_alpha=args.focal_alpha) + criterion.to(device) + postprocessors = {'bbox': PostProcess()} + if args.masks: + postprocessors['segm'] = PostProcessSegm() + if args.dataset_file == "coco_panoptic": + is_thing_map = {i: i <= 90 for i in range(201)} + postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85) + + return model, criterion, postprocessors diff --git a/Deformable-DETR/models/deformable_transformer.py b/Deformable-DETR/models/deformable_transformer.py new file mode 100644 index 0000000..08ca377 --- /dev/null +++ b/Deformable-DETR/models/deformable_transformer.py @@ -0,0 +1,394 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +import copy +from typing import Optional, List +import math + +import torch +import torch.nn.functional as F +from torch import nn, Tensor +from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_ + +from util.misc import inverse_sigmoid +from models.ops.modules import MSDeformAttn + + +class DeformableTransformer(nn.Module): + def __init__(self, d_model=256, nhead=8, + num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.1, + activation="relu", return_intermediate_dec=False, + num_feature_levels=4, dec_n_points=4, enc_n_points=4, + two_stage=False, two_stage_num_proposals=300): + super().__init__() + + self.d_model = d_model + self.nhead = nhead + self.two_stage = two_stage + self.two_stage_num_proposals = two_stage_num_proposals + + encoder_layer = DeformableTransformerEncoderLayer(d_model, dim_feedforward, + dropout, activation, + num_feature_levels, nhead, enc_n_points) + self.encoder = DeformableTransformerEncoder(encoder_layer, num_encoder_layers) + + decoder_layer = DeformableTransformerDecoderLayer(d_model, dim_feedforward, + dropout, activation, + num_feature_levels, nhead, dec_n_points) + self.decoder = DeformableTransformerDecoder(decoder_layer, num_decoder_layers, return_intermediate_dec) + + self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model)) + + if two_stage: + self.enc_output = nn.Linear(d_model, d_model) + self.enc_output_norm = nn.LayerNorm(d_model) + self.pos_trans = nn.Linear(d_model * 2, d_model * 2) + self.pos_trans_norm = nn.LayerNorm(d_model * 2) + else: + self.reference_points = nn.Linear(d_model, 2) + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MSDeformAttn): + m._reset_parameters() + if not self.two_stage: + xavier_uniform_(self.reference_points.weight.data, gain=1.0) + constant_(self.reference_points.bias.data, 0.) + normal_(self.level_embed) + + def get_proposal_pos_embed(self, proposals): + num_pos_feats = 128 + temperature = 10000 + scale = 2 * math.pi + + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device) + dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats) + # N, L, 4 + proposals = proposals.sigmoid() * scale + # N, L, 4, 128 + pos = proposals[:, :, :, None] / dim_t + # N, L, 4, 64, 2 + pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2) + return pos + + def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes): + N_, S_, C_ = memory.shape + base_scale = 4.0 + proposals = [] + _cur = 0 + for lvl, (H_, W_) in enumerate(spatial_shapes): + mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1) + valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) + valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) + + grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device), + torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device)) + grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) + + scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2) + grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale + wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl) + proposal = torch.cat((grid, wh), -1).view(N_, -1, 4) + proposals.append(proposal) + _cur += (H_ * W_) + output_proposals = torch.cat(proposals, 1) + output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True) + output_proposals = torch.log(output_proposals / (1 - output_proposals)) + output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf')) + output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf')) + + output_memory = memory + output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0)) + output_memory = output_memory.masked_fill(~output_proposals_valid, float(0)) + output_memory = self.enc_output_norm(self.enc_output(output_memory)) + return output_memory, output_proposals + + def get_valid_ratio(self, mask): + _, H, W = mask.shape + valid_H = torch.sum(~mask[:, :, 0], 1) + valid_W = torch.sum(~mask[:, 0, :], 1) + valid_ratio_h = valid_H.float() / H + valid_ratio_w = valid_W.float() / W + valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) + return valid_ratio + + def forward(self, srcs, masks, pos_embeds, query_embed=None): + assert self.two_stage or query_embed is not None + + # prepare input for encoder + src_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)): + bs, c, h, w = src.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + src = src.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + src_flatten.append(src) + mask_flatten.append(mask) + src_flatten = torch.cat(src_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) + + # encoder + memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten) + + # prepare input for decoder + bs, _, c = memory.shape + if self.two_stage: + output_memory, output_proposals = self.gen_encoder_output_proposals(memory, mask_flatten, spatial_shapes) + + # hack implementation for two-stage Deformable DETR + enc_outputs_class = self.decoder.class_embed[self.decoder.num_layers](output_memory) + enc_outputs_coord_unact = self.decoder.bbox_embed[self.decoder.num_layers](output_memory) + output_proposals + + topk = self.two_stage_num_proposals + topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1] + topk_coords_unact = torch.gather(enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) + topk_coords_unact = topk_coords_unact.detach() + reference_points = topk_coords_unact.sigmoid() + init_reference_out = reference_points + pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))) + query_embed, tgt = torch.split(pos_trans_out, c, dim=2) + else: + query_embed, tgt = torch.split(query_embed, c, dim=1) + query_embed = query_embed.unsqueeze(0).expand(bs, -1, -1) + tgt = tgt.unsqueeze(0).expand(bs, -1, -1) + reference_points = self.reference_points(query_embed).sigmoid() + init_reference_out = reference_points + + # decoder + hs, inter_references = self.decoder(tgt, reference_points, memory, + spatial_shapes, level_start_index, valid_ratios, query_embed, mask_flatten) + + inter_references_out = inter_references + if self.two_stage: + return hs, init_reference_out, inter_references_out, enc_outputs_class, enc_outputs_coord_unact + return hs, init_reference_out, inter_references_out, None, None + + +class DeformableTransformerEncoderLayer(nn.Module): + def __init__(self, + d_model=256, d_ffn=1024, + dropout=0.1, activation="relu", + n_levels=4, n_heads=8, n_points=4): + super().__init__() + + # self attention + self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points) + self.dropout1 = nn.Dropout(dropout) + self.norm1 = nn.LayerNorm(d_model) + + # ffn + self.linear1 = nn.Linear(d_model, d_ffn) + self.activation = _get_activation_fn(activation) + self.dropout2 = nn.Dropout(dropout) + self.linear2 = nn.Linear(d_ffn, d_model) + self.dropout3 = nn.Dropout(dropout) + self.norm2 = nn.LayerNorm(d_model) + + @staticmethod + def with_pos_embed(tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_ffn(self, src): + src2 = self.linear2(self.dropout2(self.activation(self.linear1(src)))) + src = src + self.dropout3(src2) + src = self.norm2(src) + return src + + def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None): + # self attention + src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask) + src = src + self.dropout1(src2) + src = self.norm1(src) + + # ffn + src = self.forward_ffn(src) + + return src + + +class DeformableTransformerEncoder(nn.Module): + def __init__(self, encoder_layer, num_layers): + super().__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + reference_points_list = [] + for lvl, (H_, W_) in enumerate(spatial_shapes): + + ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device), + torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device)) + ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_) + ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None): + output = src + reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device) + for _, layer in enumerate(self.layers): + output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask) + + return output + + +class DeformableTransformerDecoderLayer(nn.Module): + def __init__(self, d_model=256, d_ffn=1024, + dropout=0.1, activation="relu", + n_levels=4, n_heads=8, n_points=4): + super().__init__() + + # cross attention + self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points) + self.dropout1 = nn.Dropout(dropout) + self.norm1 = nn.LayerNorm(d_model) + + # self attention + self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout) + self.dropout2 = nn.Dropout(dropout) + self.norm2 = nn.LayerNorm(d_model) + + # ffn + self.linear1 = nn.Linear(d_model, d_ffn) + self.activation = _get_activation_fn(activation) + self.dropout3 = nn.Dropout(dropout) + self.linear2 = nn.Linear(d_ffn, d_model) + self.dropout4 = nn.Dropout(dropout) + self.norm3 = nn.LayerNorm(d_model) + + @staticmethod + def with_pos_embed(tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_ffn(self, tgt): + tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout4(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward(self, tgt, query_pos, reference_points, src, src_spatial_shapes, level_start_index, src_padding_mask=None): + # self attention + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1) + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + + # cross attention + tgt2 = self.cross_attn(self.with_pos_embed(tgt, query_pos), + reference_points, + src, src_spatial_shapes, level_start_index, src_padding_mask) + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + + # ffn + tgt = self.forward_ffn(tgt) + + return tgt + + +class DeformableTransformerDecoder(nn.Module): + def __init__(self, decoder_layer, num_layers, return_intermediate=False): + super().__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.return_intermediate = return_intermediate + # hack implementation for iterative bounding box refinement and two-stage Deformable DETR + self.bbox_embed = None + self.class_embed = None + + def forward(self, tgt, reference_points, src, src_spatial_shapes, src_level_start_index, src_valid_ratios, + query_pos=None, src_padding_mask=None): + output = tgt + + intermediate = [] + intermediate_reference_points = [] + for lid, layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = reference_points[:, :, None] \ + * torch.cat([src_valid_ratios, src_valid_ratios], -1)[:, None] + else: + assert reference_points.shape[-1] == 2 + reference_points_input = reference_points[:, :, None] * src_valid_ratios[:, None] + output = layer(output, query_pos, reference_points_input, src, src_spatial_shapes, src_level_start_index, src_padding_mask) + + # hack implementation for iterative bounding box refinement + if self.bbox_embed is not None: + tmp = self.bbox_embed[lid](output) + if reference_points.shape[-1] == 4: + new_reference_points = tmp + inverse_sigmoid(reference_points) + new_reference_points = new_reference_points.sigmoid() + else: + assert reference_points.shape[-1] == 2 + new_reference_points = tmp + new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + + if self.return_intermediate: + intermediate.append(output) + intermediate_reference_points.append(reference_points) + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack(intermediate_reference_points) + + return output, reference_points + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") + + +def build_deforamble_transformer(args): + return DeformableTransformer( + d_model=args.hidden_dim, + nhead=args.nheads, + num_encoder_layers=args.enc_layers, + num_decoder_layers=args.dec_layers, + dim_feedforward=args.dim_feedforward, + dropout=args.dropout, + activation="relu", + return_intermediate_dec=True, + num_feature_levels=args.num_feature_levels, + dec_n_points=args.dec_n_points, + enc_n_points=args.enc_n_points, + two_stage=args.two_stage, + two_stage_num_proposals=args.num_queries) + + diff --git a/Deformable-DETR/models/matcher.py b/Deformable-DETR/models/matcher.py new file mode 100644 index 0000000..63ef029 --- /dev/null +++ b/Deformable-DETR/models/matcher.py @@ -0,0 +1,102 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +""" +Modules to compute the matching cost and solve the corresponding LSAP. +""" +import torch +from scipy.optimize import linear_sum_assignment +from torch import nn + +from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou + + +class HungarianMatcher(nn.Module): + """This class computes an assignment between the targets and the predictions of the network + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, + there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, + while the others are un-matched (and thus treated as non-objects). + """ + + def __init__(self, + cost_class: float = 1, + cost_bbox: float = 1, + cost_giou: float = 1): + """Creates the matcher + + Params: + cost_class: This is the relative weight of the classification error in the matching cost + cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost + cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost + """ + super().__init__() + self.cost_class = cost_class + self.cost_bbox = cost_bbox + self.cost_giou = cost_giou + assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0" + + def forward(self, outputs, targets): + """ Performs the matching + + Params: + outputs: This is a dict that contains at least these entries: + "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates + + targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: + "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth + objects in the target) containing the class labels + "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates + + Returns: + A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + with torch.no_grad(): + bs, num_queries = outputs["pred_logits"].shape[:2] + + # We flatten to compute the cost matrices in a batch + out_prob = outputs["pred_logits"].flatten(0, 1).sigmoid() + out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] + + # Also concat the target labels and boxes + tgt_ids = torch.cat([v["labels"] for v in targets]) + tgt_bbox = torch.cat([v["boxes"] for v in targets]) + + # Compute the classification cost. + alpha = 0.25 + gamma = 2.0 + neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log()) + pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) + cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids] + + # Compute the L1 cost between boxes + cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1) + + # Compute the giou cost betwen boxes + cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), + box_cxcywh_to_xyxy(tgt_bbox)) + + # Final cost matrix + C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou + C = C.view(bs, num_queries, -1).cpu() + + sizes = [len(v["boxes"]) for v in targets] + indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))] + return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] + + +def build_matcher(args): + return HungarianMatcher(cost_class=args.set_cost_class, + cost_bbox=args.set_cost_bbox, + cost_giou=args.set_cost_giou) diff --git a/Deformable-DETR/models/ops/functions/__init__.py b/Deformable-DETR/models/ops/functions/__init__.py new file mode 100644 index 0000000..8a2197b --- /dev/null +++ b/Deformable-DETR/models/ops/functions/__init__.py @@ -0,0 +1,10 @@ +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +from .ms_deform_attn_func import MSDeformAttnFunction + diff --git a/Deformable-DETR/models/ops/functions/ms_deform_attn_func.py b/Deformable-DETR/models/ops/functions/ms_deform_attn_func.py new file mode 100644 index 0000000..8c5df8c --- /dev/null +++ b/Deformable-DETR/models/ops/functions/ms_deform_attn_func.py @@ -0,0 +1,61 @@ +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + +import torch +import torch.nn.functional as F +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +import MultiScaleDeformableAttention as MSDA + + +class MSDeformAttnFunction(Function): + @staticmethod + def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step): + ctx.im2col_step = im2col_step + output = MSDA.ms_deform_attn_forward( + value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ctx.im2col_step) + ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors + grad_value, grad_sampling_loc, grad_attn_weight = \ + MSDA.ms_deform_attn_backward( + value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step) + + return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None + + +def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights): + # for debug and test only, + # need to use cuda version instead + N_, S_, M_, D_ = value.shape + _, Lq_, M_, L_, P_, _ = sampling_locations.shape + value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1) + sampling_grids = 2 * sampling_locations - 1 + sampling_value_list = [] + for lid_, (H_, W_) in enumerate(value_spatial_shapes): + # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_ + value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_) + # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2 + sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1) + # N_*M_, D_, Lq_, P_ + sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_, + mode='bilinear', padding_mode='zeros', align_corners=False) + sampling_value_list.append(sampling_value_l_) + # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_) + attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_) + output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_) + return output.transpose(1, 2).contiguous() diff --git a/Deformable-DETR/models/ops/make.sh b/Deformable-DETR/models/ops/make.sh new file mode 100755 index 0000000..106b685 --- /dev/null +++ b/Deformable-DETR/models/ops/make.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +python setup.py build install diff --git a/Deformable-DETR/models/ops/modules/__init__.py b/Deformable-DETR/models/ops/modules/__init__.py new file mode 100644 index 0000000..f82cb1a --- /dev/null +++ b/Deformable-DETR/models/ops/modules/__init__.py @@ -0,0 +1,9 @@ +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +from .ms_deform_attn import MSDeformAttn diff --git a/Deformable-DETR/models/ops/modules/ms_deform_attn.py b/Deformable-DETR/models/ops/modules/ms_deform_attn.py new file mode 100644 index 0000000..663d64a --- /dev/null +++ b/Deformable-DETR/models/ops/modules/ms_deform_attn.py @@ -0,0 +1,115 @@ +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + +import warnings +import math + +import torch +from torch import nn +import torch.nn.functional as F +from torch.nn.init import xavier_uniform_, constant_ + +from ..functions import MSDeformAttnFunction + + +def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n))) + return (n & (n-1) == 0) and n != 0 + + +class MSDeformAttn(nn.Module): + def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4): + """ + Multi-Scale Deformable Attention Module + :param d_model hidden dimension + :param n_levels number of feature levels + :param n_heads number of attention heads + :param n_points number of sampling points per attention head per feature level + """ + super().__init__() + if d_model % n_heads != 0: + raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads)) + _d_per_head = d_model // n_heads + # you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation + if not _is_power_of_2(_d_per_head): + warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 " + "which is more efficient in our CUDA implementation.") + + self.im2col_step = 64 + + self.d_model = d_model + self.n_levels = n_levels + self.n_heads = n_heads + self.n_points = n_points + + self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2) + self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points) + self.value_proj = nn.Linear(d_model, d_model) + self.output_proj = nn.Linear(d_model, d_model) + + self._reset_parameters() + + def _reset_parameters(self): + constant_(self.sampling_offsets.weight.data, 0.) + thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1) + for i in range(self.n_points): + grid_init[:, :, i, :] *= i + 1 + with torch.no_grad(): + self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) + constant_(self.attention_weights.weight.data, 0.) + constant_(self.attention_weights.bias.data, 0.) + xavier_uniform_(self.value_proj.weight.data) + constant_(self.value_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None): + """ + :param query (N, Length_{query}, C) + :param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area + or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes + :param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C) + :param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})] + :param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}] + :param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements + + :return output (N, Length_{query}, C) + """ + N, Len_q, _ = query.shape + N, Len_in, _ = input_flatten.shape + assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in + + value = self.value_proj(input_flatten) + if input_padding_mask is not None: + value = value.masked_fill(input_padding_mask[..., None], float(0)) + value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads) + sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2) + attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points) + attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points) + # N, Len_q, n_heads, n_levels, n_points, 2 + if reference_points.shape[-1] == 2: + offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1) + sampling_locations = reference_points[:, :, None, :, None, :] \ + + sampling_offsets / offset_normalizer[None, None, None, :, None, :] + elif reference_points.shape[-1] == 4: + sampling_locations = reference_points[:, :, None, :, None, :2] \ + + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 + else: + raise ValueError( + 'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1])) + output = MSDeformAttnFunction.apply( + value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step) + output = self.output_proj(output) + return output diff --git a/Deformable-DETR/models/ops/setup.py b/Deformable-DETR/models/ops/setup.py new file mode 100644 index 0000000..a0131bc --- /dev/null +++ b/Deformable-DETR/models/ops/setup.py @@ -0,0 +1,71 @@ +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +import os +import glob + +import torch + +from torch.utils.cpp_extension import CUDA_HOME +from torch.utils.cpp_extension import CppExtension +from torch.utils.cpp_extension import CUDAExtension + +from setuptools import find_packages +from setuptools import setup + +requirements = ["torch", "torchvision"] + +def get_extensions(): + this_dir = os.path.dirname(os.path.abspath(__file__)) + extensions_dir = os.path.join(this_dir, "src") + + main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) + source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp")) + source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu")) + + sources = main_file + source_cpu + extension = CppExtension + extra_compile_args = {"cxx": []} + define_macros = [] + + if torch.cuda.is_available() and CUDA_HOME is not None: + extension = CUDAExtension + sources += source_cuda + define_macros += [("WITH_CUDA", None)] + extra_compile_args["nvcc"] = [ + "-DCUDA_HAS_FP16=1", + "-D__CUDA_NO_HALF_OPERATORS__", + "-D__CUDA_NO_HALF_CONVERSIONS__", + "-D__CUDA_NO_HALF2_OPERATORS__", + ] + else: + raise NotImplementedError('Cuda is not availabel') + + sources = [os.path.join(extensions_dir, s) for s in sources] + include_dirs = [extensions_dir] + ext_modules = [ + extension( + "MultiScaleDeformableAttention", + sources, + include_dirs=include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args, + ) + ] + return ext_modules + +setup( + name="MultiScaleDeformableAttention", + version="1.0", + author="Weijie Su", + url="https://github.com/fundamentalvision/Deformable-DETR", + description="PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention", + packages=find_packages(exclude=("configs", "tests",)), + ext_modules=get_extensions(), + cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, +) diff --git a/Deformable-DETR/models/ops/src/cpu/ms_deform_attn_cpu.cpp b/Deformable-DETR/models/ops/src/cpu/ms_deform_attn_cpu.cpp new file mode 100644 index 0000000..e1bf854 --- /dev/null +++ b/Deformable-DETR/models/ops/src/cpu/ms_deform_attn_cpu.cpp @@ -0,0 +1,41 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include + +#include +#include + + +at::Tensor +ms_deform_attn_cpu_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step) +{ + AT_ERROR("Not implement on cpu"); +} + +std::vector +ms_deform_attn_cpu_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step) +{ + AT_ERROR("Not implement on cpu"); +} + diff --git a/Deformable-DETR/models/ops/src/cpu/ms_deform_attn_cpu.h b/Deformable-DETR/models/ops/src/cpu/ms_deform_attn_cpu.h new file mode 100644 index 0000000..81b7b58 --- /dev/null +++ b/Deformable-DETR/models/ops/src/cpu/ms_deform_attn_cpu.h @@ -0,0 +1,33 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once +#include + +at::Tensor +ms_deform_attn_cpu_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step); + +std::vector +ms_deform_attn_cpu_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step); + + diff --git a/Deformable-DETR/models/ops/src/cuda/ms_deform_attn_cuda.cu b/Deformable-DETR/models/ops/src/cuda/ms_deform_attn_cuda.cu new file mode 100644 index 0000000..d6d5836 --- /dev/null +++ b/Deformable-DETR/models/ops/src/cuda/ms_deform_attn_cuda.cu @@ -0,0 +1,153 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include +#include "cuda/ms_deform_im2col_cuda.cuh" + +#include +#include +#include +#include + + +at::Tensor ms_deform_attn_cuda_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step) +{ + AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); + AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); + AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); + AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); + AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); + + AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); + AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); + AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); + AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); + AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); + + const int batch = value.size(0); + const int spatial_size = value.size(1); + const int num_heads = value.size(2); + const int channels = value.size(3); + + const int num_levels = spatial_shapes.size(0); + + const int num_query = sampling_loc.size(1); + const int num_point = sampling_loc.size(4); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + auto output = at::zeros({batch, num_query, num_heads, channels}, value.options()); + + const int batch_n = im2col_step_; + auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); + auto per_value_size = spatial_size * num_heads * channels; + auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; + auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto columns = output_n.select(0, n); + AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] { + ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), + value.data() + n * im2col_step_ * per_value_size, + spatial_shapes.data(), + level_start_index.data(), + sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + attn_weight.data() + n * im2col_step_ * per_attn_weight_size, + batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, + columns.data()); + + })); + } + + output = output.view({batch, num_query, num_heads*channels}); + + return output; +} + + +std::vector ms_deform_attn_cuda_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step) +{ + + AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); + AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); + AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); + AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); + AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); + AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous"); + + AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); + AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); + AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); + AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); + AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); + AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor"); + + const int batch = value.size(0); + const int spatial_size = value.size(1); + const int num_heads = value.size(2); + const int channels = value.size(3); + + const int num_levels = spatial_shapes.size(0); + + const int num_query = sampling_loc.size(1); + const int num_point = sampling_loc.size(4); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + auto grad_value = at::zeros_like(value); + auto grad_sampling_loc = at::zeros_like(sampling_loc); + auto grad_attn_weight = at::zeros_like(attn_weight); + + const int batch_n = im2col_step_; + auto per_value_size = spatial_size * num_heads * channels; + auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; + auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; + auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); + + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto grad_output_g = grad_output_n.select(0, n); + AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] { + ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(), + grad_output_g.data(), + value.data() + n * im2col_step_ * per_value_size, + spatial_shapes.data(), + level_start_index.data(), + sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + attn_weight.data() + n * im2col_step_ * per_attn_weight_size, + batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, + grad_value.data() + n * im2col_step_ * per_value_size, + grad_sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + grad_attn_weight.data() + n * im2col_step_ * per_attn_weight_size); + + })); + } + + return { + grad_value, grad_sampling_loc, grad_attn_weight + }; +} \ No newline at end of file diff --git a/Deformable-DETR/models/ops/src/cuda/ms_deform_attn_cuda.h b/Deformable-DETR/models/ops/src/cuda/ms_deform_attn_cuda.h new file mode 100644 index 0000000..c7ae53f --- /dev/null +++ b/Deformable-DETR/models/ops/src/cuda/ms_deform_attn_cuda.h @@ -0,0 +1,30 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once +#include + +at::Tensor ms_deform_attn_cuda_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step); + +std::vector ms_deform_attn_cuda_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step); + diff --git a/Deformable-DETR/models/ops/src/cuda/ms_deform_im2col_cuda.cuh b/Deformable-DETR/models/ops/src/cuda/ms_deform_im2col_cuda.cuh new file mode 100644 index 0000000..6bc2acb --- /dev/null +++ b/Deformable-DETR/models/ops/src/cuda/ms_deform_im2col_cuda.cuh @@ -0,0 +1,1327 @@ +/*! +************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************** +* Modified from DCN (https://github.com/msracver/Deformable-ConvNets) +* Copyright (c) 2018 Microsoft +************************************************************************** +*/ + +#include +#include +#include + +#include +#include + +#include + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +inline int GET_BLOCKS(const int N, const int num_threads) +{ + return (N + num_threads - 1) / num_threads; +} + + +template +__device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + } + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + + +template +__device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c, + const scalar_t &top_grad, + const scalar_t &attn_weight, + scalar_t* &grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value+ptr1, w1*top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value+ptr2, w2*top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value+ptr3, w3*top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value+ptr4, w4*top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + *grad_attn_weight = top_grad * val; + *grad_sampling_loc = width * grad_w_weight * top_grad_value; + *(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value; +} + + +template +__device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c, + const scalar_t &top_grad, + const scalar_t &attn_weight, + scalar_t* &grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value+ptr1, w1*top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value+ptr2, w2*top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value+ptr3, w3*top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value+ptr4, w4*top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + atomicAdd(grad_attn_weight, top_grad * val); + atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value); + atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value); +} + + +template +__global__ void ms_deformable_im2col_gpu_kernel(const int n, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *data_col) +{ + CUDA_KERNEL_LOOP(index, n) + { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + scalar_t *data_col_ptr = data_col + index; + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + scalar_t col = 0; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride); + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight; + } + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + } + } + *data_col_ptr = col; + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + if (tid == 0) + { + scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0]; + int sid=2; + for (unsigned int tid = 1; tid < blockSize; ++tid) + { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockSize/2; s>0; s>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + } + __syncthreads(); + } + + if (tid == 0) + { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + if (tid == 0) + { + scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0]; + int sid=2; + for (unsigned int tid = 1; tid < blockDim.x; ++tid) + { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) + { + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) + { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) + { + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) + { + atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]); + atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]); + atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]); + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_gm(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear_gm( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + grad_sampling_loc, grad_attn_weight); + } + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +void ms_deformable_im2col_cuda(cudaStream_t stream, + const scalar_t* data_value, + const int64_t* data_spatial_shapes, + const int64_t* data_level_start_index, + const scalar_t* data_sampling_loc, + const scalar_t* data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t* data_col) +{ + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + const int num_threads = CUDA_NUM_THREADS; + ms_deformable_im2col_gpu_kernel + <<>>( + num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, + batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); + } + +} + +template +void ms_deformable_col2im_cuda(cudaStream_t stream, + const scalar_t* grad_col, + const scalar_t* data_value, + const int64_t * data_spatial_shapes, + const int64_t * data_level_start_index, + const scalar_t * data_sampling_loc, + const scalar_t * data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t* grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels; + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + if (channels > 1024) + { + if ((channels & 1023) == 0) + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + else + { + ms_deformable_col2im_gpu_kernel_gm + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + } + else{ + switch(channels) + { + case 1: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 2: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 4: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 8: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 16: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 32: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 64: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 128: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 256: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 512: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 1024: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + default: + if (channels < 64) + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + else + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + } + } + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); + } + +} \ No newline at end of file diff --git a/Deformable-DETR/models/ops/src/ms_deform_attn.h b/Deformable-DETR/models/ops/src/ms_deform_attn.h new file mode 100644 index 0000000..ac0ef2e --- /dev/null +++ b/Deformable-DETR/models/ops/src/ms_deform_attn.h @@ -0,0 +1,62 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once + +#include "cpu/ms_deform_attn_cpu.h" + +#ifdef WITH_CUDA +#include "cuda/ms_deform_attn_cuda.h" +#endif + + +at::Tensor +ms_deform_attn_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step) +{ + if (value.type().is_cuda()) + { +#ifdef WITH_CUDA + return ms_deform_attn_cuda_forward( + value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector +ms_deform_attn_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step) +{ + if (value.type().is_cuda()) + { +#ifdef WITH_CUDA + return ms_deform_attn_cuda_backward( + value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + diff --git a/Deformable-DETR/models/ops/src/vision.cpp b/Deformable-DETR/models/ops/src/vision.cpp new file mode 100644 index 0000000..2201f63 --- /dev/null +++ b/Deformable-DETR/models/ops/src/vision.cpp @@ -0,0 +1,16 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include "ms_deform_attn.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward"); + m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward"); +} diff --git a/Deformable-DETR/models/ops/test.py b/Deformable-DETR/models/ops/test.py new file mode 100644 index 0000000..8dbf6d5 --- /dev/null +++ b/Deformable-DETR/models/ops/test.py @@ -0,0 +1,89 @@ +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + +import time +import torch +import torch.nn as nn +from torch.autograd import gradcheck + +from functions.ms_deform_attn_func import MSDeformAttnFunction, ms_deform_attn_core_pytorch + + +N, M, D = 1, 2, 2 +Lq, L, P = 2, 2, 2 +shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda() +level_start_index = torch.cat((shapes.new_zeros((1, )), shapes.prod(1).cumsum(0)[:-1])) +S = sum([(H*W).item() for H, W in shapes]) + + +torch.manual_seed(3) + + +@torch.no_grad() +def check_forward_equal_with_pytorch_double(): + value = torch.rand(N, S, M, D).cuda() * 0.01 + sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() + attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5 + attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True) + im2col_step = 2 + output_pytorch = ms_deform_attn_core_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu() + output_cuda = MSDeformAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu() + fwdok = torch.allclose(output_cuda, output_pytorch) + max_abs_err = (output_cuda - output_pytorch).abs().max() + max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max() + + print(f'* {fwdok} check_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + +@torch.no_grad() +def check_forward_equal_with_pytorch_float(): + value = torch.rand(N, S, M, D).cuda() * 0.01 + sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() + attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5 + attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True) + im2col_step = 2 + output_pytorch = ms_deform_attn_core_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu() + output_cuda = MSDeformAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu() + fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3) + max_abs_err = (output_cuda - output_pytorch).abs().max() + max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max() + + print(f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + +def check_gradient_numerical(channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True): + + value = torch.rand(N, S, M, channels).cuda() * 0.01 + sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() + attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5 + attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True) + im2col_step = 2 + func = MSDeformAttnFunction.apply + + value.requires_grad = grad_value + sampling_locations.requires_grad = grad_sampling_loc + attention_weights.requires_grad = grad_attn_weight + + gradok = gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step)) + + print(f'* {gradok} check_gradient_numerical(D={channels})') + + +if __name__ == '__main__': + check_forward_equal_with_pytorch_double() + check_forward_equal_with_pytorch_float() + + for channels in [30, 32, 64, 71, 1025, 2048, 3096]: + check_gradient_numerical(channels, True, True, True) + + + diff --git a/Deformable-DETR/models/position_encoding.py b/Deformable-DETR/models/position_encoding.py new file mode 100644 index 0000000..a92f0d3 --- /dev/null +++ b/Deformable-DETR/models/position_encoding.py @@ -0,0 +1,97 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +""" +Various positional encodings for the transformer. +""" +import math +import torch +from torch import nn + +from util.misc import NestedTensor + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + mask = tensor_list.mask + assert mask is not None + not_mask = ~mask + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + +class PositionEmbeddingLearned(nn.Module): + """ + Absolute pos embedding, learned. + """ + def __init__(self, num_pos_feats=256): + super().__init__() + self.row_embed = nn.Embedding(50, num_pos_feats) + self.col_embed = nn.Embedding(50, num_pos_feats) + self.reset_parameters() + + def reset_parameters(self): + nn.init.uniform_(self.row_embed.weight) + nn.init.uniform_(self.col_embed.weight) + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + h, w = x.shape[-2:] + i = torch.arange(w, device=x.device) + j = torch.arange(h, device=x.device) + x_emb = self.col_embed(i) + y_emb = self.row_embed(j) + pos = torch.cat([ + x_emb.unsqueeze(0).repeat(h, 1, 1), + y_emb.unsqueeze(1).repeat(1, w, 1), + ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1) + return pos + + +def build_position_encoding(args): + N_steps = args.hidden_dim // 2 + if args.position_embedding in ('v2', 'sine'): + # TODO find a better way of exposing other arguments + position_embedding = PositionEmbeddingSine(N_steps, normalize=True) + elif args.position_embedding in ('v3', 'learned'): + position_embedding = PositionEmbeddingLearned(N_steps) + else: + raise ValueError(f"not supported {args.position_embedding}") + + return position_embedding diff --git a/Deformable-DETR/models/segmentation.py b/Deformable-DETR/models/segmentation.py new file mode 100644 index 0000000..c801c0e --- /dev/null +++ b/Deformable-DETR/models/segmentation.py @@ -0,0 +1,369 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +""" +This file provides the definition of the convolutional heads used to predict masks, as well as the losses +""" +import io +from collections import defaultdict + +import torch +import torch.nn as nn +import torch.nn.functional as F +from PIL import Image + +import util.box_ops as box_ops +from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list + +try: + from panopticapi.utils import id2rgb, rgb2id +except ImportError: + pass + + +class DETRsegm(nn.Module): + def __init__(self, detr, freeze_detr=False): + super().__init__() + self.detr = detr + + if freeze_detr: + for p in self.parameters(): + p.requires_grad_(False) + + hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead + self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0) + self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim) + + def forward(self, samples: NestedTensor): + if not isinstance(samples, NestedTensor): + samples = nested_tensor_from_tensor_list(samples) + features, pos = self.detr.backbone(samples) + + bs = features[-1].tensors.shape[0] + + src, mask = features[-1].decompose() + src_proj = self.detr.input_proj(src) + hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1]) + + outputs_class = self.detr.class_embed(hs) + outputs_coord = self.detr.bbox_embed(hs).sigmoid() + out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]} + if self.detr.aux_loss: + out["aux_outputs"] = [ + {"pred_logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1]) + ] + + # FIXME h_boxes takes the last one computed, keep this in mind + bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask) + + seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors]) + outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]) + + out["pred_masks"] = outputs_seg_masks + return out + + +class MaskHeadSmallConv(nn.Module): + """ + Simple convolutional head, using group norm. + Upsampling is done using a FPN approach + """ + + def __init__(self, dim, fpn_dims, context_dim): + super().__init__() + + inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] + self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1) + self.gn1 = torch.nn.GroupNorm(8, dim) + self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1) + self.gn2 = torch.nn.GroupNorm(8, inter_dims[1]) + self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) + self.gn3 = torch.nn.GroupNorm(8, inter_dims[2]) + self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) + self.gn4 = torch.nn.GroupNorm(8, inter_dims[3]) + self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) + self.gn5 = torch.nn.GroupNorm(8, inter_dims[4]) + self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1) + + self.dim = dim + + self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1) + self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1) + self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_uniform_(m.weight, a=1) + nn.init.constant_(m.bias, 0) + + def forward(self, x, bbox_mask, fpns): + def expand(tensor, length): + return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) + + x = torch.cat([expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) + + x = self.lay1(x) + x = self.gn1(x) + x = F.relu(x) + x = self.lay2(x) + x = self.gn2(x) + x = F.relu(x) + + cur_fpn = self.adapter1(fpns[0]) + if cur_fpn.size(0) != x.size(0): + cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0)) + x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") + x = self.lay3(x) + x = self.gn3(x) + x = F.relu(x) + + cur_fpn = self.adapter2(fpns[1]) + if cur_fpn.size(0) != x.size(0): + cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0)) + x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") + x = self.lay4(x) + x = self.gn4(x) + x = F.relu(x) + + cur_fpn = self.adapter3(fpns[2]) + if cur_fpn.size(0) != x.size(0): + cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0)) + x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") + x = self.lay5(x) + x = self.gn5(x) + x = F.relu(x) + + x = self.out_lay(x) + return x + + +class MHAttentionMap(nn.Module): + """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" + + def __init__(self, query_dim, hidden_dim, num_heads, dropout=0, bias=True): + super().__init__() + self.num_heads = num_heads + self.hidden_dim = hidden_dim + self.dropout = nn.Dropout(dropout) + + self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) + self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) + + nn.init.zeros_(self.k_linear.bias) + nn.init.zeros_(self.q_linear.bias) + nn.init.xavier_uniform_(self.k_linear.weight) + nn.init.xavier_uniform_(self.q_linear.weight) + self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 + + def forward(self, q, k, mask=None): + q = self.q_linear(q) + k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) + qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) + kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) + weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh) + + if mask is not None: + weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf")) + weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights) + weights = self.dropout(weights) + return weights + + +def dice_loss(inputs, targets, num_boxes): + """ + Compute the DICE loss, similar to generalized IOU for masks + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + """ + inputs = inputs.sigmoid() + inputs = inputs.flatten(1) + numerator = 2 * (inputs * targets).sum(1) + denominator = inputs.sum(-1) + targets.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + return loss.sum() / num_boxes + + +def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): + """ + Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + alpha: (optional) Weighting factor in range (0,1) to balance + positive vs negative examples. Default = -1 (no weighting). + gamma: Exponent of the modulating factor (1 - p_t) to + balance easy vs hard examples. + Returns: + Loss tensor + """ + prob = inputs.sigmoid() + ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + p_t = prob * targets + (1 - prob) * (1 - targets) + loss = ce_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * loss + + return loss.mean(1).sum() / num_boxes + + +class PostProcessSegm(nn.Module): + def __init__(self, threshold=0.5): + super().__init__() + self.threshold = threshold + + @torch.no_grad() + def forward(self, results, outputs, orig_target_sizes, max_target_sizes): + assert len(orig_target_sizes) == len(max_target_sizes) + max_h, max_w = max_target_sizes.max(0)[0].tolist() + outputs_masks = outputs["pred_masks"].squeeze(2) + outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False) + outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu() + + for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): + img_h, img_w = t[0], t[1] + results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) + results[i]["masks"] = F.interpolate( + results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" + ).byte() + + return results + + +class PostProcessPanoptic(nn.Module): + """This class converts the output of the model to the final panoptic result, in the format expected by the + coco panoptic API """ + + def __init__(self, is_thing_map, threshold=0.85): + """ + Parameters: + is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether + the class is a thing (True) or a stuff (False) class + threshold: confidence threshold: segments with confidence lower than this will be deleted + """ + super().__init__() + self.threshold = threshold + self.is_thing_map = is_thing_map + + def forward(self, outputs, processed_sizes, target_sizes=None): + """ This function computes the panoptic prediction from the model's predictions. + Parameters: + outputs: This is a dict coming directly from the model. See the model doc for the content. + processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the + model, ie the size after data augmentation but before batching. + target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size + of each prediction. If left to None, it will default to the processed_sizes + """ + if target_sizes is None: + target_sizes = processed_sizes + assert len(processed_sizes) == len(target_sizes) + out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"] + assert len(out_logits) == len(raw_masks) == len(target_sizes) + preds = [] + + def to_tuple(tup): + if isinstance(tup, tuple): + return tup + return tuple(tup.cpu().tolist()) + + for cur_logits, cur_masks, cur_boxes, size, target_size in zip( + out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes + ): + # we filter empty queries and detection below threshold + scores, labels = cur_logits.softmax(-1).max(-1) + keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold) + cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) + cur_scores = cur_scores[keep] + cur_classes = cur_classes[keep] + cur_masks = cur_masks[keep] + cur_masks = interpolate(cur_masks[None], to_tuple(size), mode="bilinear").squeeze(0) + cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep]) + + h, w = cur_masks.shape[-2:] + assert len(cur_boxes) == len(cur_classes) + + # It may be that we have several predicted masks for the same stuff class. + # In the following, we track the list of masks ids for each stuff class (they are merged later on) + cur_masks = cur_masks.flatten(1) + stuff_equiv_classes = defaultdict(lambda: []) + for k, label in enumerate(cur_classes): + if not self.is_thing_map[label.item()]: + stuff_equiv_classes[label.item()].append(k) + + def get_ids_area(masks, scores, dedup=False): + # This helper function creates the final panoptic segmentation image + # It also returns the area of the masks that appears on the image + + m_id = masks.transpose(0, 1).softmax(-1) + + if m_id.shape[-1] == 0: + # We didn't detect any mask :( + m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) + else: + m_id = m_id.argmax(-1).view(h, w) + + if dedup: + # Merge the masks corresponding to the same stuff class + for equiv in stuff_equiv_classes.values(): + if len(equiv) > 1: + for eq_id in equiv: + m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) + + final_h, final_w = to_tuple(target_size) + + seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy())) + seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST) + + np_seg_img = ( + torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy() + ) + m_id = torch.from_numpy(rgb2id(np_seg_img)) + + area = [] + for i in range(len(scores)): + area.append(m_id.eq(i).sum().item()) + return area, seg_img + + area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) + if cur_classes.numel() > 0: + # We know filter empty masks as long as we find some + while True: + filtered_small = torch.as_tensor( + [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device + ) + if filtered_small.any().item(): + cur_scores = cur_scores[~filtered_small] + cur_classes = cur_classes[~filtered_small] + cur_masks = cur_masks[~filtered_small] + area, seg_img = get_ids_area(cur_masks, cur_scores) + else: + break + + else: + cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) + + segments_info = [] + for i, a in enumerate(area): + cat = cur_classes[i].item() + segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a}) + del cur_classes + + with io.BytesIO() as out: + seg_img.save(out, format="PNG") + predictions = {"png_string": out.getvalue(), "segments_info": segments_info} + preds.append(predictions) + return preds diff --git a/Deformable-DETR/requirements.txt b/Deformable-DETR/requirements.txt new file mode 100644 index 0000000..fd84672 --- /dev/null +++ b/Deformable-DETR/requirements.txt @@ -0,0 +1,4 @@ +pycocotools +tqdm +cython +scipy diff --git a/Deformable-DETR/tools/launch.py b/Deformable-DETR/tools/launch.py new file mode 100644 index 0000000..2b3ceaa --- /dev/null +++ b/Deformable-DETR/tools/launch.py @@ -0,0 +1,192 @@ +# -------------------------------------------------------------------------------------------------------------------------- +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# -------------------------------------------------------------------------------------------------------------------------- +# Modified from https://github.com/pytorch/pytorch/blob/173f224570017b4b1a3a1a13d0bff280a54d9cd9/torch/distributed/launch.py +# -------------------------------------------------------------------------------------------------------------------------- + +r""" +`torch.distributed.launch` is a module that spawns up multiple distributed +training processes on each of the training nodes. +The utility can be used for single-node distributed training, in which one or +more processes per node will be spawned. The utility can be used for either +CPU training or GPU training. If the utility is used for GPU training, +each distributed process will be operating on a single GPU. This can achieve +well-improved single-node training performance. It can also be used in +multi-node distributed training, by spawning up multiple processes on each node +for well-improved multi-node distributed training performance as well. +This will especially be benefitial for systems with multiple Infiniband +interfaces that have direct-GPU support, since all of them can be utilized for +aggregated communication bandwidth. +In both cases of single-node distributed training or multi-node distributed +training, this utility will launch the given number of processes per node +(``--nproc_per_node``). If used for GPU training, this number needs to be less +or euqal to the number of GPUs on the current system (``nproc_per_node``), +and each process will be operating on a single GPU from *GPU 0 to +GPU (nproc_per_node - 1)*. +**How to use this module:** +1. Single-Node multi-process distributed training +:: + >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE + YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other + arguments of your training script) +2. Multi-Node multi-process distributed training: (e.g. two nodes) +Node 1: *(IP: 192.168.1.1, and has a free port: 1234)* +:: + >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE + --nnodes=2 --node_rank=0 --master_addr="192.168.1.1" + --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 + and all other arguments of your training script) +Node 2: +:: + >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE + --nnodes=2 --node_rank=1 --master_addr="192.168.1.1" + --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 + and all other arguments of your training script) +3. To look up what optional arguments this module offers: +:: + >>> python -m torch.distributed.launch --help +**Important Notices:** +1. This utilty and multi-process distributed (single-node or +multi-node) GPU training currently only achieves the best performance using +the NCCL distributed backend. Thus NCCL backend is the recommended backend to +use for GPU training. +2. In your training program, you must parse the command-line argument: +``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by this module. +If your training program uses GPUs, you should ensure that your code only +runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by: +Parsing the local_rank argument +:: + >>> import argparse + >>> parser = argparse.ArgumentParser() + >>> parser.add_argument("--local_rank", type=int) + >>> args = parser.parse_args() +Set your device to local rank using either +:: + >>> torch.cuda.set_device(arg.local_rank) # before your code runs +or +:: + >>> with torch.cuda.device(arg.local_rank): + >>> # your code to run +3. In your training program, you are supposed to call the following function +at the beginning to start the distributed backend. You need to make sure that +the init_method uses ``env://``, which is the only supported ``init_method`` +by this module. +:: + torch.distributed.init_process_group(backend='YOUR BACKEND', + init_method='env://') +4. In your training program, you can either use regular distributed functions +or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your +training program uses GPUs for training and you would like to use +:func:`torch.nn.parallel.DistributedDataParallel` module, +here is how to configure it. +:: + model = torch.nn.parallel.DistributedDataParallel(model, + device_ids=[arg.local_rank], + output_device=arg.local_rank) +Please ensure that ``device_ids`` argument is set to be the only GPU device id +that your code will be operating on. This is generally the local rank of the +process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``, +and ``output_device`` needs to be ``args.local_rank`` in order to use this +utility +5. Another way to pass ``local_rank`` to the subprocesses via environment variable +``LOCAL_RANK``. This behavior is enabled when you launch the script with +``--use_env=True``. You must adjust the subprocess example above to replace +``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher +will not pass ``--local_rank`` when you specify this flag. +.. warning:: + ``local_rank`` is NOT globally unique: it is only unique per process + on a machine. Thus, don't use it to decide if you should, e.g., + write to a networked filesystem. See + https://github.com/pytorch/pytorch/issues/12042 for an example of + how things can go wrong if you don't do this correctly. +""" + + +import sys +import subprocess +import os +import socket +from argparse import ArgumentParser, REMAINDER + +import torch + + +def parse_args(): + """ + Helper function parsing the command line options + @retval ArgumentParser + """ + parser = ArgumentParser(description="PyTorch distributed training launch " + "helper utilty that will spawn up " + "multiple distributed processes") + + # Optional arguments for the launch helper + parser.add_argument("--nnodes", type=int, default=1, + help="The number of nodes to use for distributed " + "training") + parser.add_argument("--node_rank", type=int, default=0, + help="The rank of the node for multi-node distributed " + "training") + parser.add_argument("--nproc_per_node", type=int, default=1, + help="The number of processes to launch on each node, " + "for GPU training, this is recommended to be set " + "to the number of GPUs in your system so that " + "each process can be bound to a single GPU.") + parser.add_argument("--master_addr", default="127.0.0.1", type=str, + help="Master node (rank 0)'s address, should be either " + "the IP address or the hostname of node 0, for " + "single node multi-proc training, the " + "--master_addr can simply be 127.0.0.1") + parser.add_argument("--master_port", default=29500, type=int, + help="Master node (rank 0)'s free port that needs to " + "be used for communciation during distributed " + "training") + + # positional + parser.add_argument("training_script", type=str, + help="The full path to the single GPU training " + "program/script to be launched in parallel, " + "followed by all the arguments for the " + "training script") + + # rest from the training program + parser.add_argument('training_script_args', nargs=REMAINDER) + return parser.parse_args() + + +def main(): + args = parse_args() + + # world size in terms of number of processes + dist_world_size = args.nproc_per_node * args.nnodes + + # set PyTorch distributed related environmental variables + current_env = os.environ.copy() + current_env["MASTER_ADDR"] = args.master_addr + current_env["MASTER_PORT"] = str(args.master_port) + current_env["WORLD_SIZE"] = str(dist_world_size) + + processes = [] + + for local_rank in range(0, args.nproc_per_node): + # each process's rank + dist_rank = args.nproc_per_node * args.node_rank + local_rank + current_env["RANK"] = str(dist_rank) + current_env["LOCAL_RANK"] = str(local_rank) + + cmd = [args.training_script] + args.training_script_args + + process = subprocess.Popen(cmd, env=current_env) + processes.append(process) + + for process in processes: + process.wait() + if process.returncode != 0: + raise subprocess.CalledProcessError(returncode=process.returncode, + cmd=process.args) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/Deformable-DETR/tools/run_dist_launch.sh b/Deformable-DETR/tools/run_dist_launch.sh new file mode 100755 index 0000000..f6f6c4f --- /dev/null +++ b/Deformable-DETR/tools/run_dist_launch.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ + +set -x + +GPUS=$1 +RUN_COMMAND=${@:2} +if [ $GPUS -lt 8 ]; then + GPUS_PER_NODE=${GPUS_PER_NODE:-$GPUS} +else + GPUS_PER_NODE=${GPUS_PER_NODE:-8} +fi +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} +MASTER_PORT=${MASTER_PORT:-"29500"} +NODE_RANK=${NODE_RANK:-0} + +let "NNODES=GPUS/GPUS_PER_NODE" + +python ./tools/launch.py \ + --nnodes ${NNODES} \ + --node_rank ${NODE_RANK} \ + --master_addr ${MASTER_ADDR} \ + --master_port ${MASTER_PORT} \ + --nproc_per_node ${GPUS_PER_NODE} \ + ${RUN_COMMAND} \ No newline at end of file diff --git a/Deformable-DETR/tools/run_dist_slurm.sh b/Deformable-DETR/tools/run_dist_slurm.sh new file mode 100755 index 0000000..bd73d0b --- /dev/null +++ b/Deformable-DETR/tools/run_dist_slurm.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# -------------------------------------------------------------------------------------------------------------------------- +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# -------------------------------------------------------------------------------------------------------------------------- +# Modified from https://github.com/open-mmlab/mmdetection/blob/3b53fe15d87860c6941f3dda63c0f27422da6266/tools/slurm_train.sh +# -------------------------------------------------------------------------------------------------------------------------- + +set -x + +PARTITION=$1 +JOB_NAME=$2 +GPUS=$3 +RUN_COMMAND=${@:4} +if [ $GPUS -lt 8 ]; then + GPUS_PER_NODE=${GPUS_PER_NODE:-$GPUS} +else + GPUS_PER_NODE=${GPUS_PER_NODE:-8} +fi +CPUS_PER_TASK=${CPUS_PER_TASK:-4} +SRUN_ARGS=${SRUN_ARGS:-""} + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + ${RUN_COMMAND} + diff --git a/Deformable-DETR/util/__init__.py b/Deformable-DETR/util/__init__.py new file mode 100644 index 0000000..4ebdc90 --- /dev/null +++ b/Deformable-DETR/util/__init__.py @@ -0,0 +1,8 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ diff --git a/Deformable-DETR/util/box_ops.py b/Deformable-DETR/util/box_ops.py new file mode 100644 index 0000000..ca29592 --- /dev/null +++ b/Deformable-DETR/util/box_ops.py @@ -0,0 +1,96 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +""" +Utilities for bounding box manipulation and GIoU. +""" +import torch +from torchvision.ops.boxes import box_area + + +def box_cxcywh_to_xyxy(x): + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), + (x_c + 0.5 * w), (y_c + 0.5 * h)] + return torch.stack(b, dim=-1) + + +def box_xyxy_to_cxcywh(x): + x0, y0, x1, y1 = x.unbind(-1) + b = [(x0 + x1) / 2, (y0 + y1) / 2, + (x1 - x0), (y1 - y0)] + return torch.stack(b, dim=-1) + + +# modified from torchvision to also return the union +def box_iou(boxes1, boxes2): + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + wh = (rb - lt).clamp(min=0) # [N,M,2] + inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + iou = inter / union + return iou, union + + +def generalized_box_iou(boxes1, boxes2): + """ + Generalized IoU from https://giou.stanford.edu/ + + The boxes should be in [x0, y0, x1, y1] format + + Returns a [N, M] pairwise matrix, where N = len(boxes1) + and M = len(boxes2) + """ + # degenerate boxes gives inf / nan results + # so do an early check + assert (boxes1[:, 2:] >= boxes1[:, :2]).all() + assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + iou, union = box_iou(boxes1, boxes2) + + lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + wh = (rb - lt).clamp(min=0) # [N,M,2] + area = wh[:, :, 0] * wh[:, :, 1] + + return iou - (area - union) / area + + +def masks_to_boxes(masks): + """Compute the bounding boxes around the provided masks + + The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. + + Returns a [N, 4] tensors, with the boxes in xyxy format + """ + if masks.numel() == 0: + return torch.zeros((0, 4), device=masks.device) + + h, w = masks.shape[-2:] + + y = torch.arange(0, h, dtype=torch.float) + x = torch.arange(0, w, dtype=torch.float) + y, x = torch.meshgrid(y, x) + + x_mask = (masks * x.unsqueeze(0)) + x_max = x_mask.flatten(1).max(-1)[0] + x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + y_mask = (masks * y.unsqueeze(0)) + y_max = y_mask.flatten(1).max(-1)[0] + y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + return torch.stack([x_min, y_min, x_max, y_max], 1) diff --git a/Deformable-DETR/util/misc.py b/Deformable-DETR/util/misc.py new file mode 100644 index 0000000..6d4d076 --- /dev/null +++ b/Deformable-DETR/util/misc.py @@ -0,0 +1,518 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +""" +Misc functions, including distributed helpers. + +Mostly copy-paste from torchvision references. +""" +import os +import subprocess +import time +from collections import defaultdict, deque +import datetime +import pickle +from typing import Optional, List + +import torch +import torch.nn as nn +import torch.distributed as dist +from torch import Tensor + +# needed due to empty tensor bug in pytorch and torchvision 0.5 +import torchvision +if float(torchvision.__version__[:3]) < 0.5: + import math + from torchvision.ops.misc import _NewEmptyTensorOp + def _check_size_scale_factor(dim, size, scale_factor): + # type: (int, Optional[List[int]], Optional[float]) -> None + if size is None and scale_factor is None: + raise ValueError("either size or scale_factor should be defined") + if size is not None and scale_factor is not None: + raise ValueError("only one of size or scale_factor should be defined") + if not (scale_factor is not None and len(scale_factor) != dim): + raise ValueError( + "scale_factor shape must match input shape. " + "Input is {}D, scale_factor size is {}".format(dim, len(scale_factor)) + ) + def _output_size(dim, input, size, scale_factor): + # type: (int, Tensor, Optional[List[int]], Optional[float]) -> List[int] + assert dim == 2 + _check_size_scale_factor(dim, size, scale_factor) + if size is not None: + return size + # if dim is not 2 or scale_factor is iterable use _ntuple instead of concat + assert scale_factor is not None and isinstance(scale_factor, (int, float)) + scale_factors = [scale_factor, scale_factor] + # math.floor might return float in py2.7 + return [ + int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim) + ] +elif float(torchvision.__version__[:3]) < 0.7: + from torchvision.ops import _new_empty_tensor + from torchvision.ops.misc import _output_size + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + + # serialized to a Tensor + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to("cuda") + + # obtain Tensor size of each rank + local_size = torch.tensor([tensor.numel()], device="cuda") + size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) + if local_size != max_size: + padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") + tensor = torch.cat((tensor, padding), dim=0) + dist.all_gather(tensor_list, tensor) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that all processes + have the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.all_reduce(values) + if average: + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + if torch.cuda.is_available(): + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}', + 'max mem: {memory:.0f}' + ]) + else: + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def get_sha(): + cwd = os.path.dirname(os.path.abspath(__file__)) + + def _run(command): + return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() + sha = 'N/A' + diff = "clean" + branch = 'N/A' + try: + sha = _run(['git', 'rev-parse', 'HEAD']) + subprocess.check_output(['git', 'diff'], cwd=cwd) + diff = _run(['git', 'diff-index', 'HEAD']) + diff = "has uncommited changes" if diff else "clean" + branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) + except Exception: + pass + message = f"sha: {sha}, status: {diff}, branch: {branch}" + return message + + +def collate_fn(batch): + batch = list(zip(*batch)) + batch[0] = nested_tensor_from_tensor_list(batch[0]) + return tuple(batch) + + +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + # TODO make this more general + if tensor_list[0].ndim == 3: + # TODO make it support different-sized images + max_size = _max_by_axis([list(img.shape) for img in tensor_list]) + # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) + batch_shape = [len(tensor_list)] + max_size + b, c, h, w = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + mask = torch.ones((b, h, w), dtype=torch.bool, device=device) + for img, pad_img, m in zip(tensor_list, tensor, mask): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + m[: img.shape[1], :img.shape[2]] = False + else: + raise ValueError('not supported') + return NestedTensor(tensor, mask) + + +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device, non_blocking=False): + # type: (Device) -> NestedTensor # noqa + cast_tensor = self.tensors.to(device, non_blocking=non_blocking) + mask = self.mask + if mask is not None: + assert mask is not None + cast_mask = mask.to(device, non_blocking=non_blocking) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def record_stream(self, *args, **kwargs): + self.tensors.record_stream(*args, **kwargs) + if self.mask is not None: + self.mask.record_stream(*args, **kwargs) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def get_local_size(): + if not is_dist_avail_and_initialized(): + return 1 + return int(os.environ['LOCAL_SIZE']) + + +def get_local_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return int(os.environ['LOCAL_RANK']) + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + args.dist_url = 'env://' + os.environ['LOCAL_SIZE'] = str(torch.cuda.device_count()) + elif 'SLURM_PROCID' in os.environ: + proc_id = int(os.environ['SLURM_PROCID']) + ntasks = int(os.environ['SLURM_NTASKS']) + node_list = os.environ['SLURM_NODELIST'] + num_gpus = torch.cuda.device_count() + addr = subprocess.getoutput( + 'scontrol show hostname {} | head -n1'.format(node_list)) + os.environ['MASTER_PORT'] = os.environ.get('MASTER_PORT', '29500') + os.environ['MASTER_ADDR'] = addr + os.environ['WORLD_SIZE'] = str(ntasks) + os.environ['RANK'] = str(proc_id) + os.environ['LOCAL_RANK'] = str(proc_id % num_gpus) + os.environ['LOCAL_SIZE'] = str(num_gpus) + args.dist_url = 'env://' + args.world_size = ntasks + args.rank = proc_id + args.gpu = proc_id % num_gpus + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +@torch.no_grad() +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k""" + if target.numel() == 0: + return [torch.zeros([], device=output.device)] + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): + # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor + """ + Equivalent to nn.functional.interpolate, but with support for empty batch sizes. + This will eventually be supported natively by PyTorch, and this + class can go away. + """ + if float(torchvision.__version__[:3]) < 0.7: + if input.numel() > 0: + return torch.nn.functional.interpolate( + input, size, scale_factor, mode, align_corners + ) + + output_shape = _output_size(2, input, size, scale_factor) + output_shape = list(input.shape[:-2]) + list(output_shape) + if float(torchvision.__version__[:3]) < 0.5: + return _NewEmptyTensorOp.apply(input, output_shape) + return _new_empty_tensor(input, output_shape) + else: + return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) + + +def get_total_grad_norm(parameters, norm_type=2): + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + device = parameters[0].grad.device + total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), + norm_type) + return total_norm + +def inverse_sigmoid(x, eps=1e-5): + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1/x2) + diff --git a/Deformable-DETR/util/plot_utils.py b/Deformable-DETR/util/plot_utils.py new file mode 100644 index 0000000..759f34d --- /dev/null +++ b/Deformable-DETR/util/plot_utils.py @@ -0,0 +1,111 @@ +# ------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------ +# Modified from DETR (https://github.com/facebookresearch/detr) +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# ------------------------------------------------------------------------ + +""" +Plotting utilities to visualize training logs. +""" +import torch +import pandas as pd +import seaborn as sns +import matplotlib.pyplot as plt + +from pathlib import Path, PurePath + + +def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'): + ''' + Function to plot specific fields from training log(s). Plots both training and test results. + + :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file + - fields = which results to plot from each log file - plots both training and test for each field. + - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots + - log_name = optional, name of log file if different than default 'log.txt'. + + :: Outputs - matplotlib plots of results in fields, color coded for each log file. + - solid lines are training results, dashed lines are test results. + + ''' + func_name = "plot_utils.py::plot_logs" + + # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path, + # convert single Path to list to avoid 'not iterable' error + + if not isinstance(logs, list): + if isinstance(logs, PurePath): + logs = [logs] + print(f"{func_name} info: logs param expects a list argument, converted to list[Path].") + else: + raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \ + Expect list[Path] or single Path obj, received {type(logs)}") + + # verify valid dir(s) and that every item in list is Path object + for i, dir in enumerate(logs): + if not isinstance(dir, PurePath): + raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}") + if dir.exists(): + continue + raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}") + + # load log file(s) and plot + dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs] + + fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5)) + + for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))): + for j, field in enumerate(fields): + if field == 'mAP': + coco_eval = pd.DataFrame(pd.np.stack(df.test_coco_eval.dropna().values)[:, 1]).ewm(com=ewm_col).mean() + axs[j].plot(coco_eval, c=color) + else: + df.interpolate().ewm(com=ewm_col).mean().plot( + y=[f'train_{field}', f'test_{field}'], + ax=axs[j], + color=[color] * 2, + style=['-', '--'] + ) + for ax, field in zip(axs, fields): + ax.legend([Path(p).name for p in logs]) + ax.set_title(field) + + +def plot_precision_recall(files, naming_scheme='iter'): + if naming_scheme == 'exp_id': + # name becomes exp_id + names = [f.parts[-3] for f in files] + elif naming_scheme == 'iter': + names = [f.stem for f in files] + else: + raise ValueError(f'not supported {naming_scheme}') + fig, axs = plt.subplots(ncols=2, figsize=(16, 5)) + for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names): + data = torch.load(f) + # precision is n_iou, n_points, n_cat, n_area, max_det + precision = data['precision'] + recall = data['params'].recThrs + scores = data['scores'] + # take precision for all classes, all areas and 100 detections + precision = precision[0, :, :, 0, -1].mean(1) + scores = scores[0, :, :, 0, -1].mean(1) + prec = precision.mean() + rec = data['recall'][0, :, 0, -1].mean() + print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' + + f'score={scores.mean():0.3f}, ' + + f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}' + ) + axs[0].plot(recall, precision, c=color) + axs[1].plot(recall, scores, c=color) + + axs[0].set_title('Precision / Recall') + axs[0].legend(names) + axs[1].set_title('Scores / Recall') + axs[1].legend(names) + return fig, axs + + +