From a2eb45b1995f82e883b3286e3ece19fcc421a587 Mon Sep 17 00:00:00 2001 From: kkkim <314127900@qq.com> Date: Thu, 30 Nov 2017 18:05:16 +0800 Subject: [PATCH] 1.DFace star --- README.md | 137 +++++ README_en.md | 141 +++++ __init__.py | 0 anno_store/__init__.py | 0 anno_store/info | 1 + environment.yml | 66 +++ log/__init__.py | 0 log/info | 1 + model_store/__init__.py | 0 model_store/info | 1 + src/__init__.py | 0 src/config.py | 42 ++ src/core/__init__.py | 0 src/core/detect.py | 632 ++++++++++++++++++++++ src/core/image_reader.py | 171 ++++++ src/core/image_tools.py | 40 ++ src/core/imagedb.py | 162 ++++++ src/core/models.py | 207 +++++++ src/core/nms.py | 42 ++ src/core/roc.py | 2 + src/core/utils.py | 101 ++++ src/core/vision.py | 141 +++++ src/prepare_data/__init__.py | 0 src/prepare_data/assemble.py | 35 ++ src/prepare_data/assemble_onet_imglist.py | 25 + src/prepare_data/assemble_pnet_imglist.py | 25 + src/prepare_data/assemble_rnet_imglist.py | 25 + src/prepare_data/gen_Onet_train_data.py | 220 ++++++++ src/prepare_data/gen_Pnet_train_data.py | 174 ++++++ src/prepare_data/gen_Rnet_train_data.py | 219 ++++++++ src/prepare_data/gen_landmark_12.py | 156 ++++++ src/prepare_data/gen_landmark_24.py | 154 ++++++ src/prepare_data/gen_landmark_48.py | 153 ++++++ src/prepare_data/gen_landmark_net_48.py | 234 ++++++++ src/train_net/__init__.py | 0 src/train_net/train.py | 281 ++++++++++ src/train_net/train_o_net.py | 50 ++ src/train_net/train_p_net.py | 49 ++ src/train_net/train_r_net.py | 50 ++ test.jpg | Bin 0 -> 77016 bytes test_image.py | 20 + 41 files changed, 3757 insertions(+) create mode 100644 README.md create mode 100644 README_en.md create mode 100644 __init__.py create mode 100644 anno_store/__init__.py create mode 100644 anno_store/info create mode 100644 environment.yml create mode 100644 log/__init__.py create mode 100644 log/info create mode 100644 model_store/__init__.py create mode 100644 model_store/info create mode 100644 src/__init__.py create mode 100644 src/config.py create mode 100644 src/core/__init__.py create mode 100644 src/core/detect.py create mode 100644 src/core/image_reader.py create mode 100644 src/core/image_tools.py create mode 100644 src/core/imagedb.py create mode 100644 src/core/models.py create mode 100644 src/core/nms.py create mode 100644 src/core/roc.py create mode 100644 src/core/utils.py create mode 100644 src/core/vision.py create mode 100644 src/prepare_data/__init__.py create mode 100644 src/prepare_data/assemble.py create mode 100644 src/prepare_data/assemble_onet_imglist.py create mode 100644 src/prepare_data/assemble_pnet_imglist.py create mode 100644 src/prepare_data/assemble_rnet_imglist.py create mode 100644 src/prepare_data/gen_Onet_train_data.py create mode 100644 src/prepare_data/gen_Pnet_train_data.py create mode 100644 src/prepare_data/gen_Rnet_train_data.py create mode 100644 src/prepare_data/gen_landmark_12.py create mode 100644 src/prepare_data/gen_landmark_24.py create mode 100644 src/prepare_data/gen_landmark_48.py create mode 100644 src/prepare_data/gen_landmark_net_48.py create mode 100644 src/train_net/__init__.py create mode 100644 src/train_net/train.py create mode 100644 src/train_net/train_o_net.py create mode 100644 src/train_net/train_p_net.py create mode 100644 src/train_net/train_r_net.py create mode 100644 test.jpg create mode 100644 test_image.py diff --git a/README.md b/README.md new file mode 100644 index 0000000..522aeca --- /dev/null +++ b/README.md @@ -0,0 +1,137 @@ +
+ +
+ +----------------- +# DFace • [![License](http://pic.dface.io/apache2.svg)](https://opensource.org/licenses/Apache-2.0) [![gitter](http://pic.dface.io/gitee.svg)](https://gitter.im/cmusatyalab/DFace) + + +| **`Linux CPU`** | **`Linux GPU`** | **`Mac OS CPU`** | **`Windows CPU`** | +|-----------------|---------------------|------------------|-------------------| +| [![Build Status](http://pic.dface.io/pass.svg)](http://pic.dface.io/pass.svg) | [![Build Status](http://pic.dface.io/pass.svg)](http://pic.dface.io/pass.svg) | [![Build Status](http://pic.dface.io/pass.svg)](http://pic.dface.io/pass.svg) | [![Build Status](http://pic.dface.io/pass.svg)](http://pic.dface.io/pass.svg) | + + +**基于多任务卷积网络(MTCNN)和Center-Loss的多人实时人脸检测和人脸识别系统。** + + +**DFace** 是个开源的深度学习人脸检测和人脸识别系统。所有功能都采用 **[pytorch](https://github.com/pytorch/pytorch)** 框架开发。pytorch是一个由facebook开发的深度学习框架,它包含了一些比较有趣的高级特性,例如自动求导,动态构图等。DFace天然的继承了这些优点,使得它的训练过程可以更加简单方便,并且实现的代码可以更加清晰易懂。 +DFace可以利用CUDA来支持GPU加速模式。我们建议尝试linux GPU这种模式,它几乎可以实现实时的效果。 +所有的灵感都来源于学术界最近的一些研究成果,例如 [Joint Face Detection and Alignment using Multi-task Cascaded Convolutional Networks](https://arxiv.org/abs/1604.02878) 和 [FaceNet: A Unified Embedding for Face Recognition and Clustering](https://arxiv.org/abs/1503.03832) + + +**MTCNN 结构**   + +![mtcnn](http://affluent.oss-cn-hangzhou.aliyuncs.com/html/images/mtcnn_st.png) + + +**如果你对DFace感兴趣并且想参与到这个项目中, 请查看目录下的 CONTRIBUTING.md 文档,它会实时展示一些需要@TODO的清单。我会用 [issues](https://github.com/DFace/DFace/issues) +来跟踪和反馈所有的问题.** + + +## 安装 +DFace主要有两大模块,人脸检测和人脸识别。我会提供所有模型训练和运行的详细步骤。你首先需要构建一个pytorch和cv2的python环境,我推荐使用Anaconda来设置一个独立的虚拟环境。 + + +### 依赖 +* cuda 8.0 +* anaconda +* pytorch +* torchvision +* cv2 +* matplotlib + +在这里我提供了一个anaconda的环境依赖文件environment.yml,它能方便你构建自己的虚拟环境。 + +```shell +conda env create -f path/to/environment.yml +``` + +### 人脸检测 + +如果你对mtcnn模型感兴趣,以下过程可能会帮助到你。 + +#### 训练mtcnn模型 + +MTCNN主要有三个网络,叫做**PNet**, **RNet** 和 **ONet**。因此我们的训练过程也需要分三步先后进行。为了更好的实现效果,当前被训练的网络都将依赖于上一个训练好的网络来生成数据。所有的人脸数据集都来自 **[WIDER FACE](http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/)** 和 **[CelebA](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html)**。WIDER FACE仅提供了大量的人脸边框定位数据,而CelebA包含了人脸关键点定位数据。 + + +* 生成PNet训练数据和标注文件 + +```shell +python src/prepare_data/gen_Pnet_train_data.py --dataset_path {your dataset path} --anno_file {your dataset original annotation path} +``` +* 乱序合并标注文件 + +```shell +python src/prepare_data/assemble_pnet_imglist.py +``` + +* 训练PNet模型 + + +```shell +python src/train_net/train_p_net.py +``` +* 生成RNet训练数据和标注文件 + +```shell +python src/prepare_data/gen_Rnet_train_data.py --dataset_path {your dataset path} --anno_file {your dataset original annotation path} --pmodel_file {yout PNet model file trained before} +``` +* 乱序合并标注文件 + +```shell +python src/prepare_data/assemble_rnet_imglist.py +``` + +* 训练RNet模型 + +```shell +python src/train_net/train_r_net.py +``` + +* 生成ONet训练数据和标注文件 + +```shell +python src/prepare_data/gen_Onet_train_data.py --dataset_path {your dataset path} --anno_file {your dataset original annotation path} --pmodel_file {yout PNet model file trained before} --rmodel_file {yout RNet model file trained before} +``` + +* 生成ONet的人脸关键点训练数据和标注文件 + +```shell +python src/prepare_data/gen_landmark_48.py +``` + +* 乱序合并标注文件(包括人脸关键点) + +```shell +python src/prepare_data/assemble_onet_imglist.py +``` + +* 训练ONet模型 + +```shell +python src/train_net/train_o_net.py +``` + +#### 测试人脸检测 +```shell +python test_image.py +``` + +### 人脸识别 + +TODO + +## 测试效果 + +![mtcnn](http://affluent.oss-cn-hangzhou.aliyuncs.com/html/images/dface_demo.png) + + +## License + +[Apache License 2.0](LICENSE) + + +## Reference + +* [Seanlinx/mtcnn](https://github.com/Seanlinx/mtcnn) diff --git a/README_en.md b/README_en.md new file mode 100644 index 0000000..324a675 --- /dev/null +++ b/README_en.md @@ -0,0 +1,141 @@ +
+ +
+ +----------------- +# DFace • [![License](http://pic.dface.io/apache2.svg)](https://opensource.org/licenses/Apache-2.0) [![gitter](http://pic.dface.io/gitee.svg)](https://gitter.im/cmusatyalab/DFace) + + +| **`Linux CPU`** | **`Linux GPU`** | **`Mac OS CPU`** | **`Windows CPU`** | +|-----------------|---------------------|------------------|-------------------| +| [![Build Status](http://pic.dface.io/pass.svg)](http://pic.dface.io/pass.svg) | [![Build Status](http://pic.dface.io/pass.svg)](http://pic.dface.io/pass.svg) | [![Build Status](http://pic.dface.io/pass.svg)](http://pic.dface.io/pass.svg) | [![Build Status](http://pic.dface.io/pass.svg)](http://pic.dface.io/pass.svg) | + + +**Free and open source face detection and recognition with +deep learning. Based on the MTCNN and ResNet Center-Loss** + +[中文版 README](https://github.com/kuaikuaikim/DFace/blob/master/README_zh.md) + +**DFace** is an open source software for face detection and recognition. All features implemented by the **[pytorch](https://github.com/pytorch/pytorch)** (the facebook deeplearning framework). With PyTorch, we use a technique called reverse-mode auto-differentiation, which allows developer to change the way your network behaves arbitrarily with zero lag or overhead. +DFace inherit these advanced characteristic, that make it dynamic and ease code review. + +DFace support GPU acceleration with NVIDIA cuda. We highly recommend you use the linux GPU version.It's very fast and extremely realtime. + +Our inspiration comes from several research papers on this topic, as well as current and past work such as [Joint Face Detection and Alignment using Multi-task Cascaded Convolutional Networks](https://arxiv.org/abs/1604.02878) and face recognition topic [FaceNet: A Unified Embedding for Face Recognition and Clustering](https://arxiv.org/abs/1503.03832) + +**MTCNN Structure**   + +![mtcnn](http://pic.dface.io/mtcnn.png) + +**If you want to contribute to DFace, please review the CONTRIBUTING.md in the project.We use [GitHub issues](https://github.com/DFace/DFace/issues) for +tracking requests and bugs.** + +## Installation + +DFace has two major module, detection and recognition.In these two, We provide all tutorials about how to train a model and running. +First setting a pytorch and cv2. We suggest Anaconda to make a virtual and independent python envirment. + +### Requirements +* cuda 8.0 +* anaconda +* pytorch +* torchvision +* cv2 +* matplotlib + +Also we provide a anaconda environment dependency list called environment.yml in the root path. +You can create your DFace environment very easily. +```shell +conda env create -f path/to/environment.yml +``` + +### Face Detetion + +If you are interested in how to train a mtcnn model, you can follow next step. + +#### Train mtcnn Model +MTCNN have three networks called **PNet**, **RNet** and **ONet**.So we should train it on three stage, and each stage depend on previous network which will generate train data to feed current train net, also propel the minimum loss between two networks. +Please download the train face **datasets** before your training. We use **[WIDER FACE](http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/)** and **[CelebA](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html)** + + +* Generate PNet Train data and annotation file + +```shell +python src/prepare_data/gen_Pnet_train_data.py --dataset_path {your dataset path} --anno_file {your dataset original annotation path} +``` +* Assemble annotation file and shuffle it + +```shell +python src/prepare_data/assemble_pnet_imglist.py +``` + +* Train PNet model + + +```shell +python src/train_net/train_p_net.py +``` +* Generate RNet Train data and annotation file + +```shell +python src/prepare_data/gen_Rnet_train_data.py --dataset_path {your dataset path} --anno_file {your dataset original annotation path} --pmodel_file {yout PNet model file trained before} +``` +* Assemble annotation file and shuffle it + +```shell +python src/prepare_data/assemble_rnet_imglist.py +``` + +* Train RNet model + +```shell +python src/train_net/train_r_net.py +``` + +* Generate ONet Train data and annotation file + +```shell +python src/prepare_data/gen_Onet_train_data.py --dataset_path {your dataset path} --anno_file {your dataset original annotation path} --pmodel_file {yout PNet model file trained before} --rmodel_file {yout RNet model file trained before} +``` + +* Generate ONet Train landmarks data + +```shell +python src/prepare_data/gen_landmark_48.py +``` + +* Assemble annotation file and shuffle it + +```shell +python src/prepare_data/assemble_onet_imglist.py +``` + +* Train ONet model + +```shell +python src/train_net/train_o_net.py +``` + +#### Test face detection +```shell +python test_image.py +``` + +### Face Recognition + +TODO + + +## Demo + +![mtcnn](http://pic.dface.io/figure_2.png) + + +## License + +[Apache License 2.0](LICENSE) + + +## Reference + +* [Seanlinx/mtcnn](https://github.com/Seanlinx/mtcnn) diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/anno_store/__init__.py b/anno_store/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/anno_store/info b/anno_store/info new file mode 100644 index 0000000..d1bdf95 --- /dev/null +++ b/anno_store/info @@ -0,0 +1 @@ +This directory store the annotation files of train data \ No newline at end of file diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000..fb0d782 --- /dev/null +++ b/environment.yml @@ -0,0 +1,66 @@ +name: pytorch +channels: +- soumith +- https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free +- https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ +- defaults +dependencies: +- cairo=1.14.8=0 +- certifi=2016.2.28=py27_0 +- cffi=1.10.0=py27_0 +- fontconfig=2.12.1=3 +- freetype=2.5.5=2 +- glib=2.50.2=1 +- harfbuzz=0.9.39=2 +- hdf5=1.8.17=2 +- jbig=2.1=0 +- jpeg=8d=2 +- libffi=3.2.1=1 +- libgcc=5.2.0=0 +- libiconv=1.14=0 +- libpng=1.6.30=1 +- libtiff=4.0.6=2 +- libxml2=2.9.4=0 +- mkl=2017.0.3=0 +- numpy=1.12.1=py27_0 +- olefile=0.44=py27_0 +- opencv=3.1.0=np112py27_1 +- openssl=1.0.2l=0 +- pcre=8.39=1 +- pillow=3.4.2=py27_0 +- pip=9.0.1=py27_1 +- pixman=0.34.0=0 +- pycparser=2.18=py27_0 +- python=2.7.13=0 +- readline=6.2=2 +- setuptools=36.4.0=py27_1 +- six=1.10.0=py27_0 +- sqlite=3.13.0=0 +- tk=8.5.18=0 +- wheel=0.29.0=py27_0 +- xz=5.2.3=0 +- zlib=1.2.11=0 +- cycler=0.10.0=py27_0 +- dbus=1.10.20=0 +- expat=2.1.0=0 +- functools32=3.2.3.2=py27_0 +- gst-plugins-base=1.8.0=0 +- gstreamer=1.8.0=0 +- icu=54.1=0 +- libxcb=1.12=1 +- matplotlib=2.0.2=np112py27_0 +- pycairo=1.10.0=py27_0 +- pyparsing=2.2.0=py27_0 +- pyqt=5.6.0=py27_2 +- python-dateutil=2.6.1=py27_0 +- pytz=2017.2=py27_0 +- qt=5.6.2=2 +- sip=4.18=py27_0 +- subprocess32=3.2.7=py27_0 +- cuda80=1.0=0 +- pytorch=0.2.0=py27hc03bea1_4cu80 +- torchvision=0.1.9=py27hdb88a65_1 +- pip: + - torch==0.2.0.post4 +prefix: /home/asy/.conda/envs/pytorch + diff --git a/log/__init__.py b/log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/log/info b/log/info new file mode 100644 index 0000000..1be1b7d --- /dev/null +++ b/log/info @@ -0,0 +1 @@ +log dir \ No newline at end of file diff --git a/model_store/__init__.py b/model_store/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/model_store/info b/model_store/info new file mode 100644 index 0000000..91cb268 --- /dev/null +++ b/model_store/info @@ -0,0 +1 @@ +This directory store trained model net parameters and structure \ No newline at end of file diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/config.py b/src/config.py new file mode 100644 index 0000000..ffc8c88 --- /dev/null +++ b/src/config.py @@ -0,0 +1,42 @@ +import os + + +MODEL_STORE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))+"/model_store" + + +ANNO_STORE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))+"/anno_store" + + +LOG_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))+"/log" + + +USE_CUDA = True + + +TRAIN_BATCH_SIZE = 512 + +TRAIN_LR = 0.01 + +END_EPOCH = 10 + + +PNET_POSTIVE_ANNO_FILENAME = "pos_12.txt" +PNET_NEGATIVE_ANNO_FILENAME = "neg_12.txt" +PNET_PART_ANNO_FILENAME = "part_12.txt" +PNET_LANDMARK_ANNO_FILENAME = "landmark_12.txt" + + +RNET_POSTIVE_ANNO_FILENAME = "pos_24.txt" +RNET_NEGATIVE_ANNO_FILENAME = "neg_24.txt" +RNET_PART_ANNO_FILENAME = "part_24.txt" +RNET_LANDMARK_ANNO_FILENAME = "landmark_24.txt" + + +ONET_POSTIVE_ANNO_FILENAME = "pos_48.txt" +ONET_NEGATIVE_ANNO_FILENAME = "neg_48.txt" +ONET_PART_ANNO_FILENAME = "part_48.txt" +ONET_LANDMARK_ANNO_FILENAME = "landmark_48.txt" + +PNET_TRAIN_IMGLIST_FILENAME = "imglist_anno_12.txt" +RNET_TRAIN_IMGLIST_FILENAME = "imglist_anno_24.txt" +ONET_TRAIN_IMGLIST_FILENAME = "imglist_anno_48.txt" \ No newline at end of file diff --git a/src/core/__init__.py b/src/core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/core/detect.py b/src/core/detect.py new file mode 100644 index 0000000..4fcfbdd --- /dev/null +++ b/src/core/detect.py @@ -0,0 +1,632 @@ +import cv2 +import time +import numpy as np +import torch +from torch.autograd.variable import Variable +from models import PNet,RNet,ONet +import utils as utils +import image_tools + + +def create_mtcnn_net(p_model_path=None, r_model_path=None, o_model_path=None, use_cuda=True): + + pnet, rnet, onet = None, None, None + + if p_model_path is not None: + pnet = PNet(use_cuda=use_cuda) + pnet.load_state_dict(torch.load(p_model_path)) + if(use_cuda): + pnet.cuda() + pnet.eval() + + if r_model_path is not None: + rnet = RNet(use_cuda=use_cuda) + rnet.load_state_dict(torch.load(r_model_path)) + if (use_cuda): + rnet.cuda() + rnet.eval() + + if o_model_path is not None: + onet = ONet(use_cuda=use_cuda) + onet.load_state_dict(torch.load(o_model_path)) + if (use_cuda): + onet.cuda() + onet.eval() + + return pnet,rnet,onet + + + + +class MtcnnDetector(object): + """ + P,R,O net face detection and landmarks align + """ + def __init__(self, + pnet = None, + rnet = None, + onet = None, + min_face_size=12, + stride=2, + threshold=[0.6, 0.7, 0.7], + scale_factor=0.709, + ): + + self.pnet_detector = pnet + self.rnet_detector = rnet + self.onet_detector = onet + self.min_face_size = min_face_size + self.stride=stride + self.thresh = threshold + self.scale_factor = scale_factor + + + def unique_image_format(self,im): + if not isinstance(im,np.ndarray): + if im.mode == 'I': + im = np.array(im, np.int32, copy=False) + elif im.mode == 'I;16': + im = np.array(im, np.int16, copy=False) + else: + im = np.asarray(im) + return im + + def square_bbox(self, bbox): + """ + convert bbox to square + Parameters: + ---------- + bbox: numpy array , shape n x m + input bbox + Returns: + ------- + square bbox + """ + square_bbox = bbox.copy() + + h = bbox[:, 3] - bbox[:, 1] + 1 + w = bbox[:, 2] - bbox[:, 0] + 1 + l = np.maximum(h,w) + square_bbox[:, 0] = bbox[:, 0] + w*0.5 - l*0.5 + square_bbox[:, 1] = bbox[:, 1] + h*0.5 - l*0.5 + + square_bbox[:, 2] = square_bbox[:, 0] + l - 1 + square_bbox[:, 3] = square_bbox[:, 1] + l - 1 + return square_bbox + + + def generate_bounding_box(self, map, reg, scale, threshold): + """ + generate bbox from feature map + Parameters: + ---------- + map: numpy array , n x m x 1 + detect score for each position + reg: numpy array , n x m x 4 + bbox + scale: float number + scale of this detection + threshold: float number + detect threshold + Returns: + ------- + bbox array + """ + stride = 2 + cellsize = 12 + + t_index = np.where(map > threshold) + + # find nothing + if t_index[0].size == 0: + return np.array([]) + + dx1, dy1, dx2, dy2 = [reg[0, t_index[0], t_index[1], i] for i in range(4)] + reg = np.array([dx1, dy1, dx2, dy2]) + + # lefteye_dx, lefteye_dy, righteye_dx, righteye_dy, nose_dx, nose_dy, \ + # leftmouth_dx, leftmouth_dy, rightmouth_dx, rightmouth_dy = [landmarks[0, t_index[0], t_index[1], i] for i in range(10)] + # + # landmarks = np.array([lefteye_dx, lefteye_dy, righteye_dx, righteye_dy, nose_dx, nose_dy, leftmouth_dx, leftmouth_dy, rightmouth_dx, rightmouth_dy]) + + + + score = map[t_index[0], t_index[1], 0] + boundingbox = np.vstack([np.round((stride * t_index[1]) / scale), + np.round((stride * t_index[0]) / scale), + np.round((stride * t_index[1] + cellsize) / scale), + np.round((stride * t_index[0] + cellsize) / scale), + score, + reg, + # landmarks + ]) + + return boundingbox.T + + + def resize_image(self, img, scale): + """ + resize image and transform dimention to [batchsize, channel, height, width] + Parameters: + ---------- + img: numpy array , height x width x channel + input image, channels in BGR order here + scale: float number + scale factor of resize operation + Returns: + ------- + transformed image tensor , 1 x channel x height x width + """ + height, width, channels = img.shape + new_height = int(height * scale) # resized new height + new_width = int(width * scale) # resized new width + new_dim = (new_width, new_height) + img_resized = cv2.resize(img, new_dim, interpolation=cv2.INTER_LINEAR) # resized image + return img_resized + + + def pad(self, bboxes, w, h): + """ + pad the the boxes + Parameters: + ---------- + bboxes: numpy array, n x 5 + input bboxes + w: float number + width of the input image + h: float number + height of the input image + Returns : + ------ + dy, dx : numpy array, n x 1 + start point of the bbox in target image + edy, edx : numpy array, n x 1 + end point of the bbox in target image + y, x : numpy array, n x 1 + start point of the bbox in original image + ex, ex : numpy array, n x 1 + end point of the bbox in original image + tmph, tmpw: numpy array, n x 1 + height and width of the bbox + """ + + tmpw = (bboxes[:, 2] - bboxes[:, 0] + 1).astype(np.int32) + tmph = (bboxes[:, 3] - bboxes[:, 1] + 1).astype(np.int32) + numbox = bboxes.shape[0] + + dx = np.zeros((numbox, )) + dy = np.zeros((numbox, )) + edx, edy = tmpw.copy()-1, tmph.copy()-1 + + x, y, ex, ey = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3] + + tmp_index = np.where(ex > w-1) + edx[tmp_index] = tmpw[tmp_index] + w - 2 - ex[tmp_index] + ex[tmp_index] = w - 1 + + tmp_index = np.where(ey > h-1) + edy[tmp_index] = tmph[tmp_index] + h - 2 - ey[tmp_index] + ey[tmp_index] = h - 1 + + tmp_index = np.where(x < 0) + dx[tmp_index] = 0 - x[tmp_index] + x[tmp_index] = 0 + + tmp_index = np.where(y < 0) + dy[tmp_index] = 0 - y[tmp_index] + y[tmp_index] = 0 + + return_list = [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] + return_list = [item.astype(np.int32) for item in return_list] + + return return_list + + + def detect_pnet(self, im): + """Get face candidates through pnet + + Parameters: + ---------- + im: numpy array + input image array + + Returns: + ------- + boxes: numpy array + detected boxes before calibration + boxes_align: numpy array + boxes after calibration + """ + + # im = self.unique_image_format(im) + + h, w, c = im.shape + net_size = 12 + + current_scale = float(net_size) / self.min_face_size # find initial scale + im_resized = self.resize_image(im, current_scale) + current_height, current_width, _ = im_resized.shape + + # fcn + all_boxes = list() + while min(current_height, current_width) > net_size: + feed_imgs = [] + image_tensor = image_tools.convert_image_to_tensor(im_resized) + feed_imgs.append(image_tensor) + feed_imgs = torch.stack(feed_imgs) + feed_imgs = Variable(feed_imgs) + + + if self.pnet_detector.use_cuda: + feed_imgs = feed_imgs.cuda() + + cls_map, reg = self.pnet_detector(feed_imgs) + + cls_map_np = image_tools.convert_chwTensor_to_hwcNumpy(cls_map.cpu()) + reg_np = image_tools.convert_chwTensor_to_hwcNumpy(reg.cpu()) + # landmark_np = image_tools.convert_chwTensor_to_hwcNumpy(landmark.cpu()) + + + boxes = self.generate_bounding_box(cls_map_np[ 0, :, :], reg_np, current_scale, self.thresh[0]) + + current_scale *= self.scale_factor + im_resized = self.resize_image(im, current_scale) + current_height, current_width, _ = im_resized.shape + + if boxes.size == 0: + continue + keep = utils.nms(boxes[:, :5], 0.5, 'Union') + boxes = boxes[keep] + all_boxes.append(boxes) + + if len(all_boxes) == 0: + return None, None + + all_boxes = np.vstack(all_boxes) + + # merge the detection from first stage + keep = utils.nms(all_boxes[:, 0:5], 0.7, 'Union') + all_boxes = all_boxes[keep] + # boxes = all_boxes[:, :5] + + bw = all_boxes[:, 2] - all_boxes[:, 0] + 1 + bh = all_boxes[:, 3] - all_boxes[:, 1] + 1 + + # landmark_keep = all_boxes[:, 9:].reshape((5,2)) + + + boxes = np.vstack([all_boxes[:,0], + all_boxes[:,1], + all_boxes[:,2], + all_boxes[:,3], + all_boxes[:,4], + # all_boxes[:, 0] + all_boxes[:, 9] * bw, + # all_boxes[:, 1] + all_boxes[:,10] * bh, + # all_boxes[:, 0] + all_boxes[:, 11] * bw, + # all_boxes[:, 1] + all_boxes[:, 12] * bh, + # all_boxes[:, 0] + all_boxes[:, 13] * bw, + # all_boxes[:, 1] + all_boxes[:, 14] * bh, + # all_boxes[:, 0] + all_boxes[:, 15] * bw, + # all_boxes[:, 1] + all_boxes[:, 16] * bh, + # all_boxes[:, 0] + all_boxes[:, 17] * bw, + # all_boxes[:, 1] + all_boxes[:, 18] * bh + ]) + + boxes = boxes.T + + align_topx = all_boxes[:, 0] + all_boxes[:, 5] * bw + align_topy = all_boxes[:, 1] + all_boxes[:, 6] * bh + align_bottomx = all_boxes[:, 2] + all_boxes[:, 7] * bw + align_bottomy = all_boxes[:, 3] + all_boxes[:, 8] * bh + + # refine the boxes + boxes_align = np.vstack([ align_topx, + align_topy, + align_bottomx, + align_bottomy, + all_boxes[:, 4], + # align_topx + all_boxes[:,9] * bw, + # align_topy + all_boxes[:,10] * bh, + # align_topx + all_boxes[:,11] * bw, + # align_topy + all_boxes[:,12] * bh, + # align_topx + all_boxes[:,13] * bw, + # align_topy + all_boxes[:,14] * bh, + # align_topx + all_boxes[:,15] * bw, + # align_topy + all_boxes[:,16] * bh, + # align_topx + all_boxes[:,17] * bw, + # align_topy + all_boxes[:,18] * bh, + ]) + boxes_align = boxes_align.T + + return boxes, boxes_align + + def detect_rnet(self, im, dets): + """Get face candidates using rnet + + Parameters: + ---------- + im: numpy array + input image array + dets: numpy array + detection results of pnet + + Returns: + ------- + boxes: numpy array + detected boxes before calibration + boxes_align: numpy array + boxes after calibration + """ + h, w, c = im.shape + + if dets is None: + return None,None + + dets = self.square_bbox(dets) + dets[:, 0:4] = np.round(dets[:, 0:4]) + + [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(dets, w, h) + num_boxes = dets.shape[0] + + ''' + # helper for setting RNet batch size + batch_size = self.rnet_detector.batch_size + ratio = float(num_boxes) / batch_size + if ratio > 3 or ratio < 0.3: + print "You may need to reset RNet batch size if this info appears frequently, \ +face candidates:%d, current batch_size:%d"%(num_boxes, batch_size) + ''' + + # cropped_ims_tensors = np.zeros((num_boxes, 3, 24, 24), dtype=np.float32) + cropped_ims_tensors = [] + for i in range(num_boxes): + tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8) + tmp[dy[i]:edy[i]+1, dx[i]:edx[i]+1, :] = im[y[i]:ey[i]+1, x[i]:ex[i]+1, :] + crop_im = cv2.resize(tmp, (24, 24)) + crop_im_tensor = image_tools.convert_image_to_tensor(crop_im) + # cropped_ims_tensors[i, :, :, :] = crop_im_tensor + cropped_ims_tensors.append(crop_im_tensor) + feed_imgs = Variable(torch.stack(cropped_ims_tensors)) + + if self.rnet_detector.use_cuda: + feed_imgs = feed_imgs.cuda() + + cls_map, reg = self.rnet_detector(feed_imgs) + + cls_map = cls_map.cpu().data.numpy() + reg = reg.cpu().data.numpy() + # landmark = landmark.cpu().data.numpy() + + + keep_inds = np.where(cls_map > self.thresh[1])[0] + + if len(keep_inds) > 0: + boxes = dets[keep_inds] + cls = cls_map[keep_inds] + reg = reg[keep_inds] + # landmark = landmark[keep_inds] + else: + return None, None + + keep = utils.nms(boxes, 0.7) + + if len(keep) == 0: + return None, None + + keep_cls = cls[keep] + keep_boxes = boxes[keep] + keep_reg = reg[keep] + # keep_landmark = landmark[keep] + + + bw = keep_boxes[:, 2] - keep_boxes[:, 0] + 1 + bh = keep_boxes[:, 3] - keep_boxes[:, 1] + 1 + + + boxes = np.vstack([ keep_boxes[:,0], + keep_boxes[:,1], + keep_boxes[:,2], + keep_boxes[:,3], + keep_cls[:,0], + # keep_boxes[:,0] + keep_landmark[:, 0] * bw, + # keep_boxes[:,1] + keep_landmark[:, 1] * bh, + # keep_boxes[:,0] + keep_landmark[:, 2] * bw, + # keep_boxes[:,1] + keep_landmark[:, 3] * bh, + # keep_boxes[:,0] + keep_landmark[:, 4] * bw, + # keep_boxes[:,1] + keep_landmark[:, 5] * bh, + # keep_boxes[:,0] + keep_landmark[:, 6] * bw, + # keep_boxes[:,1] + keep_landmark[:, 7] * bh, + # keep_boxes[:,0] + keep_landmark[:, 8] * bw, + # keep_boxes[:,1] + keep_landmark[:, 9] * bh, + ]) + + align_topx = keep_boxes[:,0] + keep_reg[:,0] * bw + align_topy = keep_boxes[:,1] + keep_reg[:,1] * bh + align_bottomx = keep_boxes[:,2] + keep_reg[:,2] * bw + align_bottomy = keep_boxes[:,3] + keep_reg[:,3] * bh + + boxes_align = np.vstack([align_topx, + align_topy, + align_bottomx, + align_bottomy, + keep_cls[:, 0], + # align_topx + keep_landmark[:, 0] * bw, + # align_topy + keep_landmark[:, 1] * bh, + # align_topx + keep_landmark[:, 2] * bw, + # align_topy + keep_landmark[:, 3] * bh, + # align_topx + keep_landmark[:, 4] * bw, + # align_topy + keep_landmark[:, 5] * bh, + # align_topx + keep_landmark[:, 6] * bw, + # align_topy + keep_landmark[:, 7] * bh, + # align_topx + keep_landmark[:, 8] * bw, + # align_topy + keep_landmark[:, 9] * bh, + ]) + + boxes = boxes.T + boxes_align = boxes_align.T + + return boxes, boxes_align + + def detect_onet(self, im, dets): + """Get face candidates using onet + + Parameters: + ---------- + im: numpy array + input image array + dets: numpy array + detection results of rnet + + Returns: + ------- + boxes_align: numpy array + boxes after calibration + landmarks_align: numpy array + landmarks after calibration + + """ + h, w, c = im.shape + + if dets is None: + return None, None + + dets = self.square_bbox(dets) + dets[:, 0:4] = np.round(dets[:, 0:4]) + + [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(dets, w, h) + num_boxes = dets.shape[0] + + + # cropped_ims_tensors = np.zeros((num_boxes, 3, 24, 24), dtype=np.float32) + cropped_ims_tensors = [] + for i in range(num_boxes): + tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8) + tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :] + crop_im = cv2.resize(tmp, (48, 48)) + crop_im_tensor = image_tools.convert_image_to_tensor(crop_im) + # cropped_ims_tensors[i, :, :, :] = crop_im_tensor + cropped_ims_tensors.append(crop_im_tensor) + feed_imgs = Variable(torch.stack(cropped_ims_tensors)) + + if self.rnet_detector.use_cuda: + feed_imgs = feed_imgs.cuda() + + cls_map, reg, landmark = self.onet_detector(feed_imgs) + + cls_map = cls_map.cpu().data.numpy() + reg = reg.cpu().data.numpy() + landmark = landmark.cpu().data.numpy() + + keep_inds = np.where(cls_map > self.thresh[2])[0] + + if len(keep_inds) > 0: + boxes = dets[keep_inds] + cls = cls_map[keep_inds] + reg = reg[keep_inds] + landmark = landmark[keep_inds] + else: + return None, None + + keep = utils.nms(boxes, 0.7, mode="Minimum") + + if len(keep) == 0: + return None, None + + keep_cls = cls[keep] + keep_boxes = boxes[keep] + keep_reg = reg[keep] + keep_landmark = landmark[keep] + + bw = keep_boxes[:, 2] - keep_boxes[:, 0] + 1 + bh = keep_boxes[:, 3] - keep_boxes[:, 1] + 1 + + + align_topx = keep_boxes[:, 0] + keep_reg[:, 0] * bw + align_topy = keep_boxes[:, 1] + keep_reg[:, 1] * bh + align_bottomx = keep_boxes[:, 2] + keep_reg[:, 2] * bw + align_bottomy = keep_boxes[:, 3] + keep_reg[:, 3] * bh + + align_landmark_topx = keep_boxes[:, 0] + align_landmark_topy = keep_boxes[:, 1] + + + + + boxes_align = np.vstack([align_topx, + align_topy, + align_bottomx, + align_bottomy, + keep_cls[:, 0], + # align_topx + keep_landmark[:, 0] * bw, + # align_topy + keep_landmark[:, 1] * bh, + # align_topx + keep_landmark[:, 2] * bw, + # align_topy + keep_landmark[:, 3] * bh, + # align_topx + keep_landmark[:, 4] * bw, + # align_topy + keep_landmark[:, 5] * bh, + # align_topx + keep_landmark[:, 6] * bw, + # align_topy + keep_landmark[:, 7] * bh, + # align_topx + keep_landmark[:, 8] * bw, + # align_topy + keep_landmark[:, 9] * bh, + ]) + + boxes_align = boxes_align.T + + landmark = np.vstack([ + align_landmark_topx + keep_landmark[:, 0] * bw, + align_landmark_topy + keep_landmark[:, 1] * bh, + align_landmark_topx + keep_landmark[:, 2] * bw, + align_landmark_topy + keep_landmark[:, 3] * bh, + align_landmark_topx + keep_landmark[:, 4] * bw, + align_landmark_topy + keep_landmark[:, 5] * bh, + align_landmark_topx + keep_landmark[:, 6] * bw, + align_landmark_topy + keep_landmark[:, 7] * bh, + align_landmark_topx + keep_landmark[:, 8] * bw, + align_landmark_topy + keep_landmark[:, 9] * bh, + ]) + + landmark_align = landmark.T + + return boxes_align, landmark_align + + + def detect_face(self,img): + """Detect face over image + """ + boxes_align = np.array([]) + landmark_align =np.array([]) + + t = time.time() + + # pnet + if self.pnet_detector: + boxes, boxes_align = self.detect_pnet(img) + if boxes_align is None: + return np.array([]), np.array([]) + + t1 = time.time() - t + t = time.time() + + # rnet + if self.rnet_detector: + boxes, boxes_align = self.detect_rnet(img, boxes_align) + if boxes_align is None: + return np.array([]), np.array([]) + + t2 = time.time() - t + t = time.time() + + # onet + if self.onet_detector: + boxes_align, landmark_align = self.detect_onet(img, boxes_align) + if boxes_align is None: + return np.array([]), np.array([]) + + t3 = time.time() - t + t = time.time() + print "time cost " + '{:.3f}'.format(t1+t2+t3) + ' pnet {:.3f} rnet {:.3f} onet {:.3f}'.format(t1, t2, t3) + + return boxes_align, landmark_align + + + + diff --git a/src/core/image_reader.py b/src/core/image_reader.py new file mode 100644 index 0000000..550f50a --- /dev/null +++ b/src/core/image_reader.py @@ -0,0 +1,171 @@ +import numpy as np +import cv2 + + + +class TrainImageReader: + def __init__(self, imdb, im_size, batch_size=128, shuffle=False): + + self.imdb = imdb + self.batch_size = batch_size + self.im_size = im_size + self.shuffle = shuffle + + self.cur = 0 + self.size = len(imdb) + self.index = np.arange(self.size) + self.num_classes = 2 + + self.batch = None + self.data = None + self.label = None + + self.label_names= ['label', 'bbox_target', 'landmark_target'] + self.reset() + self.get_batch() + + def reset(self): + self.cur = 0 + if self.shuffle: + np.random.shuffle(self.index) + + def iter_next(self): + return self.cur + self.batch_size <= self.size + + def __iter__(self): + return self + + def __next__(self): + return self.next() + + def next(self): + if self.iter_next(): + self.get_batch() + self.cur += self.batch_size + return self.data,self.label + else: + raise StopIteration + + def getindex(self): + return self.cur / self.batch_size + + def getpad(self): + if self.cur + self.batch_size > self.size: + return self.cur + self.batch_size - self.size + else: + return 0 + + def get_batch(self): + cur_from = self.cur + cur_to = min(cur_from + self.batch_size, self.size) + imdb = [self.imdb[self.index[i]] for i in range(cur_from, cur_to)] + data, label = get_minibatch(imdb) + self.data = data['data'] + self.label = [label[name] for name in self.label_names] + + + +class TestImageLoader: + def __init__(self, imdb, batch_size=1, shuffle=False): + self.imdb = imdb + self.batch_size = batch_size + self.shuffle = shuffle + self.size = len(imdb) + self.index = np.arange(self.size) + + self.cur = 0 + self.data = None + self.label = None + + self.reset() + self.get_batch() + + def reset(self): + self.cur = 0 + if self.shuffle: + np.random.shuffle(self.index) + + def iter_next(self): + return self.cur + self.batch_size <= self.size + + def __iter__(self): + return self + + def __next__(self): + return self.next() + + def next(self): + if self.iter_next(): + self.get_batch() + self.cur += self.batch_size + return self.data + else: + raise StopIteration + + def getindex(self): + return self.cur / self.batch_size + + def getpad(self): + if self.cur + self.batch_size > self.size: + return self.cur + self.batch_size - self.size + else: + return 0 + + def get_batch(self): + cur_from = self.cur + cur_to = min(cur_from + self.batch_size, self.size) + imdb = [self.imdb[self.index[i]] for i in range(cur_from, cur_to)] + data= get_testbatch(imdb) + self.data=data['data'] + + + + +def get_minibatch(imdb): + + # im_size: 12, 24 or 48 + num_images = len(imdb) + processed_ims = list() + cls_label = list() + bbox_reg_target = list() + landmark_reg_target = list() + + for i in range(num_images): + im = cv2.imread(imdb[i]['image']) + #im = Image.open(imdb[i]['image']) + + if imdb[i]['flipped']: + im = im[:, ::-1, :] + #im = im.transpose(Image.FLIP_LEFT_RIGHT) + + cls = imdb[i]['label'] + bbox_target = imdb[i]['bbox_target'] + landmark = imdb[i]['landmark_target'] + + processed_ims.append(im) + cls_label.append(cls) + bbox_reg_target.append(bbox_target) + landmark_reg_target.append(landmark) + + im_array = np.asarray(processed_ims) + + label_array = np.array(cls_label) + + bbox_target_array = np.vstack(bbox_reg_target) + + landmark_target_array = np.vstack(landmark_reg_target) + + data = {'data': im_array} + label = {'label': label_array, + 'bbox_target': bbox_target_array, + 'landmark_target': landmark_target_array + } + + return data, label + + +def get_testbatch(imdb): + assert len(imdb) == 1, "Single batch only" + im = cv2.imread(imdb[0]['image']) + data = {'data': im} + return data \ No newline at end of file diff --git a/src/core/image_tools.py b/src/core/image_tools.py new file mode 100644 index 0000000..ef6704d --- /dev/null +++ b/src/core/image_tools.py @@ -0,0 +1,40 @@ +import torchvision.transforms as transforms +import torch +from torch.autograd.variable import Variable +import numpy as np + +transform = transforms.ToTensor() + +def convert_image_to_tensor(image): + """convert an image to pytorch tensor + + Parameters: + ---------- + image: numpy array , h * w * c + + Returns: + ------- + image_tensor: pytorch.FloatTensor, c * h * w + """ + image = image.astype(np.float) + return transform(image) + # return transform(image) + + +def convert_chwTensor_to_hwcNumpy(tensor): + """convert a group images pytorch tensor(count * c * h * w) to numpy array images(count * h * w * c) + Parameters: + ---------- + tensor: numpy array , count * c * h * w + + Returns: + ------- + numpy array images: count * h * w * c + """ + + if isinstance(tensor, Variable): + return np.transpose(tensor.data.numpy(), (0,2,3,1)) + elif isinstance(tensor, torch.FloatTensor): + return np.transpose(tensor.numpy(), (0,2,3,1)) + else: + raise Exception("covert b*c*h*w tensor to b*h*w*c numpy error.This tensor must have 4 dimension.") \ No newline at end of file diff --git a/src/core/imagedb.py b/src/core/imagedb.py new file mode 100644 index 0000000..be14ac2 --- /dev/null +++ b/src/core/imagedb.py @@ -0,0 +1,162 @@ +import os +import numpy as np + +class ImageDB(object): + def __init__(self, image_annotation_file, prefix_path='', mode='train'): + self.prefix_path = prefix_path + self.image_annotation_file = image_annotation_file + self.classes = ['__background__', 'face'] + self.num_classes = 2 + self.image_set_index = self.load_image_set_index() + self.num_images = len(self.image_set_index) + self.mode = mode + + + def load_image_set_index(self): + """Get image index + + Parameters: + ---------- + Returns: + ------- + image_set_index: str + relative path of image + """ + assert os.path.exists(self.image_annotation_file), 'Path does not exist: {}'.format(self.image_annotation_file) + with open(self.image_annotation_file, 'r') as f: + image_set_index = [x.strip().split(' ')[0] for x in f.readlines()] + return image_set_index + + + def load_imdb(self): + """Get and save ground truth image database + + Parameters: + ---------- + Returns: + ------- + gt_imdb: dict + image database with annotations + """ + #cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl') + #if os.path.exists(cache_file): + # with open(cache_file, 'rb') as f: + # imdb = cPickle.load(f) + # print '{} gt imdb loaded from {}'.format(self.name, cache_file) + # return imdb + gt_imdb = self.load_annotations() + #with open(cache_file, 'wb') as f: + # cPickle.dump(gt_imdb, f, cPickle.HIGHEST_PROTOCOL) + return gt_imdb + + + def real_image_path(self, index): + """Given image index, return full path + + Parameters: + ---------- + index: str + relative path of image + Returns: + ------- + image_file: str + full path of image + """ + + index = index.replace("\\", "/") + + if not os.path.exists(index): + image_file = os.path.join(self.prefix_path, index) + else: + image_file=index + if not image_file.endswith('.jpg'): + image_file = image_file + '.jpg' + assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file) + return image_file + + + def load_annotations(self,annotion_type=1): + """Load annotations + + Parameters: + ---------- + annotion_type: int + 0:dsadsa + 1:dsadsa + Returns: + ------- + imdb: dict + image database with annotations + """ + + assert os.path.exists(self.image_annotation_file), 'annotations not found at {}'.format(self.image_annotation_file) + with open(self.image_annotation_file, 'r') as f: + annotations = f.readlines() + + + imdb = [] + for i in range(self.num_images): + annotation = annotations[i].strip().split(' ') + index = annotation[0] + im_path = self.real_image_path(index) + imdb_ = dict() + imdb_['image'] = im_path + + if self.mode == 'test': + # gt_boxes = map(float, annotation[1:]) + # boxes = np.array(bbox, dtype=np.float32).reshape(-1, 4) + # imdb_['gt_boxes'] = boxes + pass + else: + label = annotation[1] + imdb_['label'] = int(label) + imdb_['flipped'] = False + imdb_['bbox_target'] = np.zeros((4,)) + imdb_['landmark_target'] = np.zeros((10,)) + if len(annotation[2:])==4: + bbox_target = annotation[2:6] + imdb_['bbox_target'] = np.array(bbox_target).astype(float) + if len(annotation[2:])==14: + bbox_target = annotation[2:6] + imdb_['bbox_target'] = np.array(bbox_target).astype(float) + landmark = annotation[6:] + imdb_['landmark_target'] = np.array(landmark).astype(float) + imdb.append(imdb_) + return imdb + + + def append_flipped_images(self, imdb): + """append flipped images to imdb + + Parameters: + ---------- + imdb: imdb + image database + Returns: + ------- + imdb: dict + image database with flipped image annotations added + """ + print 'append flipped images to imdb', len(imdb) + for i in range(len(imdb)): + imdb_ = imdb[i] + m_bbox = imdb_['bbox_target'].copy() + m_bbox[0], m_bbox[2] = -m_bbox[2], -m_bbox[0] + + landmark_ = imdb_['landmark_target'].copy() + landmark_ = landmark_.reshape((5, 2)) + landmark_ = np.asarray([(1 - x, y) for (x, y) in landmark_]) + landmark_[[0, 1]] = landmark_[[1, 0]] + landmark_[[3, 4]] = landmark_[[4, 3]] + + item = {'image': imdb_['image'], + 'label': imdb_['label'], + 'bbox_target': m_bbox, + 'landmark_target': landmark_.reshape((10)), + 'flipped': True} + + imdb.append(item) + self.image_set_index *= 2 + return imdb + + diff --git a/src/core/models.py b/src/core/models.py new file mode 100644 index 0000000..a11f485 --- /dev/null +++ b/src/core/models.py @@ -0,0 +1,207 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def weights_init(m): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + nn.init.xavier_uniform(m.weight.data) + nn.init.constant(m.bias, 0.1) + + + +class LossFn: + def __init__(self, cls_factor=1, box_factor=1, landmark_factor=1): + # loss function + self.cls_factor = cls_factor + self.box_factor = box_factor + self.land_factor = landmark_factor + self.loss_cls = nn.BCELoss() + self.loss_box = nn.MSELoss() + self.loss_landmark = nn.MSELoss() + + + def cls_loss(self,gt_label,pred_label): + pred_label = torch.squeeze(pred_label) + gt_label = torch.squeeze(gt_label) + # get the mask element which >= 0, only 0 and 1 can effect the detection loss + mask = torch.ge(gt_label,0) + valid_gt_label = torch.masked_select(gt_label,mask) + valid_pred_label = torch.masked_select(pred_label,mask) + return self.loss_cls(valid_pred_label,valid_gt_label)*self.cls_factor + + + def box_loss(self,gt_label,gt_offset,pred_offset): + pred_offset = torch.squeeze(pred_offset) + gt_offset = torch.squeeze(gt_offset) + gt_label = torch.squeeze(gt_label) + + #get the mask element which != 0 + unmask = torch.eq(gt_label,0) + mask = torch.eq(unmask,0) + #convert mask to dim index + chose_index = torch.nonzero(mask.data) + chose_index = torch.squeeze(chose_index) + #only valid element can effect the loss + valid_gt_offset = gt_offset[chose_index,:] + valid_pred_offset = pred_offset[chose_index,:] + return self.loss_box(valid_pred_offset,valid_gt_offset)*self.box_factor + + + def landmark_loss(self,gt_label,gt_landmark,pred_landmark): + pred_landmark = torch.squeeze(pred_landmark) + gt_landmark = torch.squeeze(gt_landmark) + gt_label = torch.squeeze(gt_label) + mask = torch.eq(gt_label,-2) + + chose_index = torch.nonzero(mask.data) + chose_index = torch.squeeze(chose_index) + + valid_gt_landmark = gt_landmark[chose_index, :] + valid_pred_landmark = pred_landmark[chose_index, :] + return self.loss_landmark(valid_pred_landmark,valid_gt_landmark)*self.land_factor + + + + + +class PNet(nn.Module): + ''' PNet ''' + + def __init__(self, is_train=False, use_cuda=True): + super(PNet, self).__init__() + self.is_train = is_train + self.use_cuda = use_cuda + + # backend + self.pre_layer = nn.Sequential( + nn.Conv2d(3, 10, kernel_size=3, stride=1), # conv1 + nn.PReLU(), # PReLU1 + nn.MaxPool2d(kernel_size=2, stride=2), # pool1 + nn.Conv2d(10, 16, kernel_size=3, stride=1), # conv2 + nn.PReLU(), # PReLU2 + nn.Conv2d(16, 32, kernel_size=3, stride=1), # conv3 + nn.PReLU() # PReLU3 + ) + # detection + self.conv4_1 = nn.Conv2d(32, 1, kernel_size=1, stride=1) + # bounding box regresion + self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1, stride=1) + # landmark localization + self.conv4_3 = nn.Conv2d(32, 10, kernel_size=1, stride=1) + + # weight initiation with xavier + self.apply(weights_init) + + def forward(self, x): + x = self.pre_layer(x) + label = F.sigmoid(self.conv4_1(x)) + offset = self.conv4_2(x) + # landmark = self.conv4_3(x) + + if self.is_train is True: + # label_loss = LossUtil.label_loss(self.gt_label,torch.squeeze(label)) + # bbox_loss = LossUtil.bbox_loss(self.gt_bbox,torch.squeeze(offset)) + return label,offset + #landmark = self.conv4_3(x) + return label, offset + + + + + +class RNet(nn.Module): + ''' RNet ''' + + def __init__(self,is_train=False, use_cuda=True): + super(RNet, self).__init__() + self.is_train = is_train + self.use_cuda = use_cuda + # backend + self.pre_layer = nn.Sequential( + nn.Conv2d(3, 28, kernel_size=3, stride=1), # conv1 + nn.PReLU(), # prelu1 + nn.MaxPool2d(kernel_size=3, stride=2), # pool1 + nn.Conv2d(28, 48, kernel_size=3, stride=1), # conv2 + nn.PReLU(), # prelu2 + nn.MaxPool2d(kernel_size=3, stride=2), # pool2 + nn.Conv2d(48, 64, kernel_size=2, stride=1), # conv3 + nn.PReLU() # prelu3 + + ) + self.conv4 = nn.Linear(64*2*2, 128) # conv4 + self.prelu4 = nn.PReLU() # prelu4 + # detection + self.conv5_1 = nn.Linear(128, 1) + # bounding box regression + self.conv5_2 = nn.Linear(128, 4) + # lanbmark localization + self.conv5_3 = nn.Linear(128, 10) + # weight initiation weih xavier + self.apply(weights_init) + + def forward(self, x): + # backend + x = self.pre_layer(x) + x = x.view(x.size(0), -1) + x = self.conv4(x) + x = self.prelu4(x) + # detection + det = torch.sigmoid(self.conv5_1(x)) + box = self.conv5_2(x) + # landmark = self.conv5_3(x) + + if self.is_train is True: + return det, box + #landmard = self.conv5_3(x) + return det, box + + + + +class ONet(nn.Module): + ''' RNet ''' + + def __init__(self,is_train=False, use_cuda=True): + super(ONet, self).__init__() + self.is_train = is_train + self.use_cuda = use_cuda + # backend + self.pre_layer = nn.Sequential( + nn.Conv2d(3, 32, kernel_size=3, stride=1), # conv1 + nn.PReLU(), # prelu1 + nn.MaxPool2d(kernel_size=3, stride=2), # pool1 + nn.Conv2d(32, 64, kernel_size=3, stride=1), # conv2 + nn.PReLU(), # prelu2 + nn.MaxPool2d(kernel_size=3, stride=2), # pool2 + nn.Conv2d(64, 64, kernel_size=3, stride=1), # conv3 + nn.PReLU(), # prelu3 + nn.MaxPool2d(kernel_size=2,stride=2), # pool3 + nn.Conv2d(64,128,kernel_size=2,stride=1), # conv4 + nn.PReLU() # prelu4 + ) + self.conv5 = nn.Linear(128*2*2, 256) # conv5 + self.prelu5 = nn.PReLU() # prelu5 + # detection + self.conv6_1 = nn.Linear(256, 1) + # bounding box regression + self.conv6_2 = nn.Linear(256, 4) + # lanbmark localization + self.conv6_3 = nn.Linear(256, 10) + # weight initiation weih xavier + self.apply(weights_init) + + def forward(self, x): + # backend + x = self.pre_layer(x) + x = x.view(x.size(0), -1) + x = self.conv5(x) + x = self.prelu5(x) + # detection + det = torch.sigmoid(self.conv6_1(x)) + box = self.conv6_2(x) + landmark = self.conv6_3(x) + if self.is_train is True: + return det, box, landmark + #landmard = self.conv5_3(x) + return det, box, landmark diff --git a/src/core/nms.py b/src/core/nms.py new file mode 100644 index 0000000..ea01aff --- /dev/null +++ b/src/core/nms.py @@ -0,0 +1,42 @@ +import numpy as np + + +def torch_nms(dets, thresh, mode="Union"): + """ + greedily select boxes with high confidence + keep boxes overlap <= thresh + rule out overlap > thresh + :param dets: [[x1, y1, x2, y2 score]] + :param thresh: retain overlap <= thresh + :return: indexes to keep + """ + x1 = dets[:, 0] + y1 = dets[:, 1] + x2 = dets[:, 2] + y2 = dets[:, 3] + scores = dets[:, 4] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + if mode == "Union": + ovr = inter / (areas[i] + areas[order[1:]] - inter) + elif mode == "Minimum": + ovr = inter / np.minimum(areas[i], areas[order[1:]]) + + inds = np.where(ovr <= thresh)[0] + order = order[inds + 1] + + return keep diff --git a/src/core/roc.py b/src/core/roc.py new file mode 100644 index 0000000..07ff604 --- /dev/null +++ b/src/core/roc.py @@ -0,0 +1,2 @@ +import numpy as np + diff --git a/src/core/utils.py b/src/core/utils.py new file mode 100644 index 0000000..9fcec10 --- /dev/null +++ b/src/core/utils.py @@ -0,0 +1,101 @@ +import numpy as np + +def IoU(box, boxes): + """Compute IoU between detect box and gt boxes + + Parameters: + ---------- + box: numpy array , shape (5, ): x1, y1, x2, y2, score + input box + boxes: numpy array, shape (n, 4): x1, y1, x2, y2 + input ground truth boxes + + Returns: + ------- + ovr: numpy.array, shape (n, ) + IoU + """ + box_area = (box[2] - box[0] + 1) * (box[3] - box[1] + 1) + area = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1) + xx1 = np.maximum(box[0], boxes[:, 0]) + yy1 = np.maximum(box[1], boxes[:, 1]) + xx2 = np.minimum(box[2], boxes[:, 2]) + yy2 = np.minimum(box[3], boxes[:, 3]) + + # compute the width and height of the bounding box + w = np.maximum(0, xx2 - xx1 + 1) + h = np.maximum(0, yy2 - yy1 + 1) + + inter = w * h + ovr = np.true_divide(inter,(box_area + area - inter)) + #ovr = inter / (box_area + area - inter) + return ovr + + +def convert_to_square(bbox): + """Convert bbox to square + + Parameters: + ---------- + bbox: numpy array , shape n x 5 + input bbox + + Returns: + ------- + square bbox + """ + square_bbox = bbox.copy() + + h = bbox[:, 3] - bbox[:, 1] + 1 + w = bbox[:, 2] - bbox[:, 0] + 1 + max_side = np.maximum(h,w) + square_bbox[:, 0] = bbox[:, 0] + w*0.5 - max_side*0.5 + square_bbox[:, 1] = bbox[:, 1] + h*0.5 - max_side*0.5 + square_bbox[:, 2] = square_bbox[:, 0] + max_side - 1 + square_bbox[:, 3] = square_bbox[:, 1] + max_side - 1 + return square_bbox + + +def nms(dets, thresh, mode="Union"): + """ + greedily select boxes with high confidence + keep boxes overlap <= thresh + rule out overlap > thresh + :param dets: [[x1, y1, x2, y2 score]] + :param thresh: retain overlap <= thresh + :return: indexes to keep + """ + x1 = dets[:, 0] + y1 = dets[:, 1] + x2 = dets[:, 2] + y2 = dets[:, 3] + scores = dets[:, 4] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + if mode == "Union": + ovr = inter / (areas[i] + areas[order[1:]] - inter) + elif mode == "Minimum": + ovr = inter / np.minimum(areas[i], areas[order[1:]]) + + inds = np.where(ovr <= thresh)[0] + order = order[inds + 1] + + return keep + + + + diff --git a/src/core/vision.py b/src/core/vision.py new file mode 100644 index 0000000..b8016bd --- /dev/null +++ b/src/core/vision.py @@ -0,0 +1,141 @@ +from matplotlib.patches import Circle + + +def vis_two(im_array, dets1, dets2, thresh=0.9): + """Visualize detection results before and after calibration + + Parameters: + ---------- + im_array: numpy.ndarray, shape(1, c, h, w) + test image in rgb + dets1: numpy.ndarray([[x1 y1 x2 y2 score]]) + detection results before calibration + dets2: numpy.ndarray([[x1 y1 x2 y2 score]]) + detection results after calibration + thresh: float + boxes with scores > thresh will be drawn in red otherwise yellow + + Returns: + ------- + """ + import matplotlib.pyplot as plt + import random + + figure = plt.figure() + plt.subplot(121) + plt.imshow(im_array) + color = 'yellow' + + for i in range(dets1.shape[0]): + bbox = dets1[i, :4] + landmarks = dets1[i, 5:] + score = dets1[i, 4] + if score > thresh: + rect = plt.Rectangle((bbox[0], bbox[1]), + bbox[2] - bbox[0], + bbox[3] - bbox[1], fill=False, + edgecolor='red', linewidth=0.7) + plt.gca().add_patch(rect) + landmarks = landmarks.reshape((5,2)) + for j in range(5): + plt.scatter(landmarks[j,0],landmarks[j,1],c='yellow',linewidths=0.1, marker='x', s=5) + + + # plt.gca().text(bbox[0], bbox[1] - 2, + # '{:.3f}'.format(score), + # bbox=dict(facecolor='blue', alpha=0.5), fontsize=12, color='white') + # else: + # rect = plt.Rectangle((bbox[0], bbox[1]), + # bbox[2] - bbox[0], + # bbox[3] - bbox[1], fill=False, + # edgecolor=color, linewidth=0.5) + # plt.gca().add_patch(rect) + + plt.subplot(122) + plt.imshow(im_array) + color = 'yellow' + + for i in range(dets2.shape[0]): + bbox = dets2[i, :4] + landmarks = dets1[i, 5:] + score = dets2[i, 4] + if score > thresh: + rect = plt.Rectangle((bbox[0], bbox[1]), + bbox[2] - bbox[0], + bbox[3] - bbox[1], fill=False, + edgecolor='red', linewidth=0.7) + plt.gca().add_patch(rect) + + landmarks = landmarks.reshape((5, 2)) + for j in range(5): + plt.scatter(landmarks[j, 0], landmarks[j, 1], c='yellow',linewidths=0.1, marker='x', s=5) + + # plt.gca().text(bbox[0], bbox[1] - 2, + # '{:.3f}'.format(score), + # bbox=dict(facecolor='blue', alpha=0.5), fontsize=12, color='white') + # else: + # rect = plt.Rectangle((bbox[0], bbox[1]), + # bbox[2] - bbox[0], + # bbox[3] - bbox[1], fill=False, + # edgecolor=color, linewidth=0.5) + # plt.gca().add_patch(rect) + plt.show() + + +def vis_face(im_array, dets, landmarks=None): + """Visualize detection results before and after calibration + + Parameters: + ---------- + im_array: numpy.ndarray, shape(1, c, h, w) + test image in rgb + dets1: numpy.ndarray([[x1 y1 x2 y2 score]]) + detection results before calibration + dets2: numpy.ndarray([[x1 y1 x2 y2 score]]) + detection results after calibration + thresh: float + boxes with scores > thresh will be drawn in red otherwise yellow + + Returns: + ------- + """ + import matplotlib.pyplot as plt + import random + import pylab + + figure = pylab.figure() + # plt.subplot(121) + pylab.imshow(im_array) + figure.suptitle('DFace Detector', fontsize=20) + + + + for i in range(dets.shape[0]): + bbox = dets[i, :4] + + rect = pylab.Rectangle((bbox[0], bbox[1]), + bbox[2] - bbox[0], + bbox[3] - bbox[1], fill=False, + edgecolor='yellow', linewidth=0.9) + pylab.gca().add_patch(rect) + + if landmarks is not None: + for i in range(landmarks.shape[0]): + landmarks_one = landmarks[i, :] + landmarks_one = landmarks_one.reshape((5, 2)) + for j in range(5): + # pylab.scatter(landmarks_one[j, 0], landmarks_one[j, 1], c='yellow', linewidths=0.1, marker='x', s=5) + + cir1 = Circle(xy=(landmarks_one[j, 0], landmarks_one[j, 1]), radius=2, alpha=0.4, color="red") + pylab.gca().add_patch(cir1) + # plt.gca().text(bbox[0], bbox[1] - 2, + # '{:.3f}'.format(score), + # bbox=dict(facecolor='blue', alpha=0.5), fontsize=12, color='white') + # else: + # rect = plt.Rectangle((bbox[0], bbox[1]), + # bbox[2] - bbox[0], + # bbox[3] - bbox[1], fill=False, + # edgecolor=color, linewidth=0.5) + # plt.gca().add_patch(rect) + + pylab.show() \ No newline at end of file diff --git a/src/prepare_data/__init__.py b/src/prepare_data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/prepare_data/assemble.py b/src/prepare_data/assemble.py new file mode 100644 index 0000000..bc92093 --- /dev/null +++ b/src/prepare_data/assemble.py @@ -0,0 +1,35 @@ + +import os +import numpy.random as npr +import numpy as np + +def assemble_data(output_file, anno_file_list=[]): + #assemble the annotations to one file + size = 12 + + if len(anno_file_list)==0: + return 0 + + if os.path.exists(output_file): + os.remove(output_file) + + for anno_file in anno_file_list: + with open(anno_file, 'r') as f: + anno_lines = f.readlines() + + base_num = 250000 + + if len(anno_lines) > base_num * 3: + idx_keep = npr.choice(len(anno_lines), size=base_num * 3, replace=True) + elif len(anno_lines) > 100000: + idx_keep = npr.choice(len(anno_lines), size=len(anno_lines), replace=True) + else: + idx_keep = np.arange(len(anno_lines)) + np.random.shuffle(idx_keep) + chose_count = 0 + with open(output_file, 'a+') as f: + for idx in idx_keep: + f.write(anno_lines[idx]) + chose_count+=1 + + return chose_count \ No newline at end of file diff --git a/src/prepare_data/assemble_onet_imglist.py b/src/prepare_data/assemble_onet_imglist.py new file mode 100644 index 0000000..1574c51 --- /dev/null +++ b/src/prepare_data/assemble_onet_imglist.py @@ -0,0 +1,25 @@ +import os +import config +import assemble as assemble + + +if __name__ == '__main__': + + anno_list = [] + + net_landmark_file = os.path.join(config.ANNO_STORE_DIR,config.ONET_LANDMARK_ANNO_FILENAME) + net_postive_file = os.path.join(config.ANNO_STORE_DIR,config.ONET_POSTIVE_ANNO_FILENAME) + net_part_file = os.path.join(config.ANNO_STORE_DIR,config.ONET_PART_ANNO_FILENAME) + net_neg_file = os.path.join(config.ANNO_STORE_DIR,config.ONET_NEGATIVE_ANNO_FILENAME) + + anno_list.append(net_postive_file) + anno_list.append(net_part_file) + anno_list.append(net_neg_file) + anno_list.append(net_landmark_file) + + imglist_filename = config.ONET_TRAIN_IMGLIST_FILENAME + anno_dir = config.ANNO_STORE_DIR + imglist_file = os.path.join(anno_dir, imglist_filename) + + chose_count = assemble.assemble_data(imglist_file ,anno_list) + print "PNet train annotation result file path:%s" % imglist_file diff --git a/src/prepare_data/assemble_pnet_imglist.py b/src/prepare_data/assemble_pnet_imglist.py new file mode 100644 index 0000000..1a3aaf6 --- /dev/null +++ b/src/prepare_data/assemble_pnet_imglist.py @@ -0,0 +1,25 @@ +import os +import config +import assemble as assemble + + +if __name__ == '__main__': + + anno_list = [] + + # pnet_landmark_file = os.path.join(config.ANNO_STORE_DIR,config.PNET_LANDMARK_ANNO_FILENAME) + pnet_postive_file = os.path.join(config.ANNO_STORE_DIR,config.PNET_POSTIVE_ANNO_FILENAME) + pnet_part_file = os.path.join(config.ANNO_STORE_DIR,config.PNET_PART_ANNO_FILENAME) + pnet_neg_file = os.path.join(config.ANNO_STORE_DIR,config.PNET_NEGATIVE_ANNO_FILENAME) + + anno_list.append(pnet_postive_file) + anno_list.append(pnet_part_file) + anno_list.append(pnet_neg_file) + # anno_list.append(pnet_landmark_file) + + imglist_filename = config.PNET_TRAIN_IMGLIST_FILENAME + anno_dir = config.ANNO_STORE_DIR + imglist_file = os.path.join(anno_dir, imglist_filename) + + chose_count = assemble.assemble_data(imglist_file ,anno_list) + print "PNet train annotation result file path:%s" % imglist_file diff --git a/src/prepare_data/assemble_rnet_imglist.py b/src/prepare_data/assemble_rnet_imglist.py new file mode 100644 index 0000000..1ea7a94 --- /dev/null +++ b/src/prepare_data/assemble_rnet_imglist.py @@ -0,0 +1,25 @@ +import os +import config +import assemble as assemble + + +if __name__ == '__main__': + + anno_list = [] + + # pnet_landmark_file = os.path.join(config.ANNO_STORE_DIR,config.RNET_LANDMARK_ANNO_FILENAME) + pnet_postive_file = os.path.join(config.ANNO_STORE_DIR,config.RNET_POSTIVE_ANNO_FILENAME) + pnet_part_file = os.path.join(config.ANNO_STORE_DIR,config.RNET_PART_ANNO_FILENAME) + pnet_neg_file = os.path.join(config.ANNO_STORE_DIR,config.RNET_NEGATIVE_ANNO_FILENAME) + + anno_list.append(pnet_postive_file) + anno_list.append(pnet_part_file) + anno_list.append(pnet_neg_file) + # anno_list.append(pnet_landmark_file) + + imglist_filename = config.RNET_TRAIN_IMGLIST_FILENAME + anno_dir = config.ANNO_STORE_DIR + imglist_file = os.path.join(anno_dir, imglist_filename) + + chose_count = assemble.assemble_data(imglist_file ,anno_list) + print "PNet train annotation result file path:%s" % imglist_file diff --git a/src/prepare_data/gen_Onet_train_data.py b/src/prepare_data/gen_Onet_train_data.py new file mode 100644 index 0000000..d151288 --- /dev/null +++ b/src/prepare_data/gen_Onet_train_data.py @@ -0,0 +1,220 @@ +import argparse + +import cv2 +import numpy as np +from core.detect import MtcnnDetector,create_mtcnn_net +from core.imagedb import ImageDB +from core.image_reader import TestImageLoader +import time +import os +import cPickle +from core.utils import convert_to_square,IoU +import config +import core.vision as vision + +def gen_onet_data(data_dir, anno_file, pnet_model_file, rnet_model_file, prefix_path='', use_cuda=True, vis=False): + + + pnet, rnet, _ = create_mtcnn_net(p_model_path=pnet_model_file, r_model_path=rnet_model_file, use_cuda=use_cuda) + mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, min_face_size=12) + + imagedb = ImageDB(anno_file,mode="test",prefix_path=prefix_path) + imdb = imagedb.load_imdb() + image_reader = TestImageLoader(imdb,1,False) + + all_boxes = list() + batch_idx = 0 + + for databatch in image_reader: + if batch_idx % 100 == 0: + print "%d images done" % batch_idx + im = databatch + + t = time.time() + + p_boxes, p_boxes_align = mtcnn_detector.detect_pnet(im=im) + + boxes, boxes_align = mtcnn_detector.detect_rnet(im=im, dets=p_boxes_align) + + if boxes_align is None: + all_boxes.append(np.array([])) + batch_idx += 1 + continue + if vis: + rgb_im = cv2.cvtColor(np.asarray(im), cv2.COLOR_BGR2RGB) + vision.vis_two(rgb_im, boxes, boxes_align) + + t1 = time.time() - t + t = time.time() + all_boxes.append(boxes_align) + batch_idx += 1 + + save_path = config.MODEL_STORE_DIR + + if not os.path.exists(save_path): + os.mkdir(save_path) + + save_file = os.path.join(save_path, "detections_%d.pkl" % int(time.time())) + with open(save_file, 'wb') as f: + cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL) + + + gen_onet_sample_data(data_dir,anno_file,save_file) + + + + + + +def gen_onet_sample_data(data_dir,anno_file,det_boxs_file): + + neg_save_dir = os.path.join(data_dir, "48/negative") + pos_save_dir = os.path.join(data_dir, "48/positive") + part_save_dir = os.path.join(data_dir, "48/part") + + for dir_path in [neg_save_dir, pos_save_dir, part_save_dir]: + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + + # load ground truth from annotation file + # format of each line: image/path [x1,y1,x2,y2] for each gt_box in this image + + with open(anno_file, 'r') as f: + annotations = f.readlines() + + image_size = 48 + net = "onet" + + im_idx_list = list() + gt_boxes_list = list() + num_of_images = len(annotations) + print "processing %d images in total" % num_of_images + + for annotation in annotations: + annotation = annotation.strip().split(' ') + im_idx = annotation[0] + + boxes = map(float, annotation[1:]) + boxes = np.array(boxes, dtype=np.float32).reshape(-1, 4) + im_idx_list.append(im_idx) + gt_boxes_list.append(boxes) + + + save_path = config.ANNO_STORE_DIR + if not os.path.exists(save_path): + os.makedirs(save_path) + + f1 = open(os.path.join(save_path, 'pos_%d.txt' % image_size), 'w') + f2 = open(os.path.join(save_path, 'neg_%d.txt' % image_size), 'w') + f3 = open(os.path.join(save_path, 'part_%d.txt' % image_size), 'w') + + det_handle = open(det_boxs_file, 'r') + + det_boxes = cPickle.load(det_handle) + print len(det_boxes), num_of_images + assert len(det_boxes) == num_of_images, "incorrect detections or ground truths" + + # index of neg, pos and part face, used as their image names + n_idx = 0 + p_idx = 0 + d_idx = 0 + image_done = 0 + for im_idx, dets, gts in zip(im_idx_list, det_boxes, gt_boxes_list): + if image_done % 100 == 0: + print "%d images done" % image_done + image_done += 1 + + if dets.shape[0] == 0: + continue + img = cv2.imread(im_idx) + dets = convert_to_square(dets) + dets[:, 0:4] = np.round(dets[:, 0:4]) + + for box in dets: + x_left, y_top, x_right, y_bottom = box[0:4].astype(int) + width = x_right - x_left + 1 + height = y_bottom - y_top + 1 + + # ignore box that is too small or beyond image border + if width < 20 or x_left < 0 or y_top < 0 or x_right > img.shape[1] - 1 or y_bottom > img.shape[0] - 1: + continue + + # compute intersection over union(IoU) between current box and all gt boxes + Iou = IoU(box, gts) + cropped_im = img[y_top:y_bottom + 1, x_left:x_right + 1, :] + resized_im = cv2.resize(cropped_im, (image_size, image_size), + interpolation=cv2.INTER_LINEAR) + + # save negative images and write label + if np.max(Iou) < 0.3: + # Iou with all gts must below 0.3 + save_file = os.path.join(neg_save_dir, "%s.jpg" % n_idx) + f2.write(save_file + ' 0\n') + cv2.imwrite(save_file, resized_im) + n_idx += 1 + else: + # find gt_box with the highest iou + idx = np.argmax(Iou) + assigned_gt = gts[idx] + x1, y1, x2, y2 = assigned_gt + + # compute bbox reg label + offset_x1 = (x1 - x_left) / float(width) + offset_y1 = (y1 - y_top) / float(height) + offset_x2 = (x2 - x_right) / float(width) + offset_y2 = (y2 - y_bottom) / float(height) + + # save positive and part-face images and write labels + if np.max(Iou) >= 0.65: + save_file = os.path.join(pos_save_dir, "%s.jpg" % p_idx) + f1.write(save_file + ' 1 %.2f %.2f %.2f %.2f\n' % ( + offset_x1, offset_y1, offset_x2, offset_y2)) + cv2.imwrite(save_file, resized_im) + p_idx += 1 + + elif np.max(Iou) >= 0.4: + save_file = os.path.join(part_save_dir, "%s.jpg" % d_idx) + f3.write(save_file + ' -1 %.2f %.2f %.2f %.2f\n' % ( + offset_x1, offset_y1, offset_x2, offset_y2)) + cv2.imwrite(save_file, resized_im) + d_idx += 1 + f1.close() + f2.close() + f3.close() + + + +def model_store_path(): + return os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))+"/model_store" + + + +def parse_args(): + parser = argparse.ArgumentParser(description='Test mtcnn', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument('--dataset_path', dest='dataset_path', help='dataset folder', + default='../data/wider/', type=str) + parser.add_argument('--anno_file', dest='annotation_file', help='output data folder', + default='../data/wider/anno.txt', type=str) + parser.add_argument('--pmodel_file', dest='pnet_model_file', help='PNet model file path', + default='/idata/workspace/mtcnn/model_store/pnet_epoch_5best.pt', type=str) + parser.add_argument('--rmodel_file', dest='rnet_model_file', help='RNet model file path', + default='/idata/workspace/mtcnn/model_store/rnet_epoch_1.pt', type=str) + parser.add_argument('--gpu', dest='use_cuda', help='with gpu', + default=config.USE_CUDA, type=bool) + parser.add_argument('--prefix_path', dest='prefix_path', help='image prefix root path', + default='', type=str) + + args = parser.parse_args() + return args + + + +if __name__ == '__main__': + args = parse_args() + gen_onet_data(args.dataset_path, args.annotation_file, args.pnet_model_file, args.rnet_model_file, args.prefix_path, args.use_cuda) + + + diff --git a/src/prepare_data/gen_Pnet_train_data.py b/src/prepare_data/gen_Pnet_train_data.py new file mode 100644 index 0000000..be37699 --- /dev/null +++ b/src/prepare_data/gen_Pnet_train_data.py @@ -0,0 +1,174 @@ +import argparse +import numpy as np +import cv2 +import os +import numpy.random as npr +from core.utils import IoU +import config + + + +def gen_pnet_data(data_dir,anno_file): + + neg_save_dir = os.path.join(data_dir,"12/negative") + pos_save_dir = os.path.join(data_dir,"12/positive") + part_save_dir = os.path.join(data_dir,"12/part") + + for dir_path in [neg_save_dir,pos_save_dir,part_save_dir]: + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + save_dir = os.path.join(data_dir,"pnet") + if not os.path.exists(save_dir): + os.mkdir(save_dir) + + post_save_file = os.path.join(config.ANNO_STORE_DIR,config.PNET_POSTIVE_ANNO_FILENAME) + neg_save_file = os.path.join(config.ANNO_STORE_DIR,config.PNET_NEGATIVE_ANNO_FILENAME) + part_save_file = os.path.join(config.ANNO_STORE_DIR,config.PNET_PART_ANNO_FILENAME) + + f1 = open(post_save_file, 'w') + f2 = open(neg_save_file, 'w') + f3 = open(part_save_file, 'w') + + with open(anno_file, 'r') as f: + annotations = f.readlines() + + num = len(annotations) + print "%d pics in total" % num + p_idx = 0 + n_idx = 0 + d_idx = 0 + idx = 0 + box_idx = 0 + for annotation in annotations: + annotation = annotation.strip().split(' ') + im_path = annotation[0] + bbox = map(float, annotation[1:]) + boxes = np.array(bbox, dtype=np.int32).reshape(-1, 4) + img = cv2.imread(im_path) + idx += 1 + if idx % 100 == 0: + print idx, "images done" + + height, width, channel = img.shape + + neg_num = 0 + while neg_num < 50: + size = npr.randint(12, min(width, height) / 2) + nx = npr.randint(0, width - size) + ny = npr.randint(0, height - size) + crop_box = np.array([nx, ny, nx + size, ny + size]) + + Iou = IoU(crop_box, boxes) + + cropped_im = img[ny : ny + size, nx : nx + size, :] + resized_im = cv2.resize(cropped_im, (12, 12), interpolation=cv2.INTER_LINEAR) + + if np.max(Iou) < 0.3: + # Iou with all gts must below 0.3 + save_file = os.path.join(neg_save_dir, "%s.jpg"%n_idx) + f2.write(save_file + ' 0\n') + cv2.imwrite(save_file, resized_im) + n_idx += 1 + neg_num += 1 + + + for box in boxes: + # box (x_left, y_top, x_right, y_bottom) + x1, y1, x2, y2 = box + w = x2 - x1 + 1 + h = y2 - y1 + 1 + + # ignore small faces + # in case the ground truth boxes of small faces are not accurate + if max(w, h) < 40 or x1 < 0 or y1 < 0: + continue + + # generate negative examples that have overlap with gt + for i in range(5): + size = npr.randint(12, min(width, height) / 2) + # delta_x and delta_y are offsets of (x1, y1) + delta_x = npr.randint(max(-size, -x1), w) + delta_y = npr.randint(max(-size, -y1), h) + nx1 = max(0, x1 + delta_x) + ny1 = max(0, y1 + delta_y) + + + + if nx1 + size > width or ny1 + size > height: + continue + crop_box = np.array([nx1, ny1, nx1 + size, ny1 + size]) + Iou = IoU(crop_box, boxes) + + cropped_im = img[ny1 : ny1 + size, nx1 : nx1 + size, :] + resized_im = cv2.resize(cropped_im, (12, 12), interpolation=cv2.INTER_LINEAR) + + if np.max(Iou) < 0.3: + # Iou with all gts must below 0.3 + save_file = os.path.join(neg_save_dir, "%s.jpg"%n_idx) + f2.write(save_file + ' 0\n') + cv2.imwrite(save_file, resized_im) + n_idx += 1 + + # generate positive examples and part faces + for i in range(20): + size = npr.randint(int(min(w, h) * 0.8), np.ceil(1.25 * max(w, h))) + + # delta here is the offset of box center + delta_x = npr.randint(-w * 0.2, w * 0.2) + delta_y = npr.randint(-h * 0.2, h * 0.2) + + nx1 = max(x1 + w / 2 + delta_x - size / 2, 0) + ny1 = max(y1 + h / 2 + delta_y - size / 2, 0) + nx2 = nx1 + size + ny2 = ny1 + size + + if nx2 > width or ny2 > height: + continue + crop_box = np.array([nx1, ny1, nx2, ny2]) + + offset_x1 = (x1 - nx1) / float(size) + offset_y1 = (y1 - ny1) / float(size) + offset_x2 = (x2 - nx2) / float(size) + offset_y2 = (y2 - ny2) / float(size) + + cropped_im = img[ny1 : ny2, nx1 : nx2, :] + resized_im = cv2.resize(cropped_im, (12, 12), interpolation=cv2.INTER_LINEAR) + + box_ = box.reshape(1, -1) + if IoU(crop_box, box_) >= 0.65: + save_file = os.path.join(pos_save_dir, "%s.jpg"%p_idx) + f1.write(save_file + ' 1 %.2f %.2f %.2f %.2f\n'%(offset_x1, offset_y1, offset_x2, offset_y2)) + cv2.imwrite(save_file, resized_im) + p_idx += 1 + elif IoU(crop_box, box_) >= 0.4: + save_file = os.path.join(part_save_dir, "%s.jpg"%d_idx) + f3.write(save_file + ' -1 %.2f %.2f %.2f %.2f\n'%(offset_x1, offset_y1, offset_x2, offset_y2)) + cv2.imwrite(save_file, resized_im) + d_idx += 1 + box_idx += 1 + print "%s images done, pos: %s part: %s neg: %s"%(idx, p_idx, d_idx, n_idx) + + f1.close() + f2.close() + f3.close() + + + +def parse_args(): + parser = argparse.ArgumentParser(description='Test mtcnn', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument('--dataset_path', dest='dataset_path', help='dataset folder', + default='../data/wider/', type=str) + parser.add_argument('--anno_file', dest='annotation_file', help='dataset original annotation file', + default='../data/wider/anno.txt', type=str) + parser.add_argument('--prefix_path', dest='prefix_path', help='image prefix root path', + default='', type=str) + + args = parser.parse_args() + return args + +if __name__ == '__main__': + args = parse_args() + gen_pnet_data(args.dataset_path,args.annotation_file) \ No newline at end of file diff --git a/src/prepare_data/gen_Rnet_train_data.py b/src/prepare_data/gen_Rnet_train_data.py new file mode 100644 index 0000000..b9d922c --- /dev/null +++ b/src/prepare_data/gen_Rnet_train_data.py @@ -0,0 +1,219 @@ +import argparse + + +import cv2 +import numpy as np +from core.detect import MtcnnDetector,create_mtcnn_net +from core.imagedb import ImageDB +from core.image_reader import TestImageLoader +import time +import os +import cPickle +from core.utils import convert_to_square,IoU +import config +import core.vision as vision + +def gen_rnet_data(data_dir, anno_file, pnet_model_file, prefix_path='', use_cuda=True, vis=False): + + + pnet, _, _ = create_mtcnn_net(p_model_path=pnet_model_file, use_cuda=use_cuda) + mtcnn_detector = MtcnnDetector(pnet=pnet,min_face_size=12) + + imagedb = ImageDB(anno_file,mode="test",prefix_path=prefix_path) + imdb = imagedb.load_imdb() + image_reader = TestImageLoader(imdb,1,False) + + all_boxes = list() + batch_idx = 0 + + for databatch in image_reader: + if batch_idx % 100 == 0: + print "%d images done" % batch_idx + im = databatch + + t = time.time() + + boxes, boxes_align = mtcnn_detector.detect_pnet(im=im) + if boxes_align is None: + all_boxes.append(np.array([])) + batch_idx += 1 + continue + if vis: + rgb_im = cv2.cvtColor(np.asarray(im), cv2.COLOR_BGR2RGB) + vision.vis_two(rgb_im, boxes, boxes_align) + + t1 = time.time() - t + t = time.time() + all_boxes.append(boxes_align) + batch_idx += 1 + + # save_path = model_store_path() + save_path = config.MODEL_STORE_DIR + + if not os.path.exists(save_path): + os.mkdir(save_path) + + save_file = os.path.join(save_path, "detections_%d.pkl" % int(time.time())) + with open(save_file, 'wb') as f: + cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL) + + + gen_rnet_sample_data(data_dir,anno_file,save_file) + + + +def gen_rnet_sample_data(data_dir,anno_file,det_boxs_file): + + neg_save_dir = os.path.join(data_dir, "24/negative") + pos_save_dir = os.path.join(data_dir, "24/positive") + part_save_dir = os.path.join(data_dir, "24/part") + + for dir_path in [neg_save_dir, pos_save_dir, part_save_dir]: + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + + # load ground truth from annotation file + # format of each line: image/path [x1,y1,x2,y2] for each gt_box in this image + + with open(anno_file, 'r') as f: + annotations = f.readlines() + + image_size = 24 + net = "rnet" + + im_idx_list = list() + gt_boxes_list = list() + num_of_images = len(annotations) + print "processing %d images in total" % num_of_images + + for annotation in annotations: + annotation = annotation.strip().split(' ') + im_idx = annotation[0] + + boxes = map(float, annotation[1:]) + boxes = np.array(boxes, dtype=np.float32).reshape(-1, 4) + im_idx_list.append(im_idx) + gt_boxes_list.append(boxes) + + + save_path = config.ANNO_STORE_DIR + if not os.path.exists(save_path): + os.makedirs(save_path) + + f1 = open(os.path.join(save_path, 'pos_%d.txt' % image_size), 'w') + f2 = open(os.path.join(save_path, 'neg_%d.txt' % image_size), 'w') + f3 = open(os.path.join(save_path, 'part_%d.txt' % image_size), 'w') + + det_handle = open(det_boxs_file, 'r') + + det_boxes = cPickle.load(det_handle) + print len(det_boxes), num_of_images + assert len(det_boxes) == num_of_images, "incorrect detections or ground truths" + + # index of neg, pos and part face, used as their image names + n_idx = 0 + p_idx = 0 + d_idx = 0 + image_done = 0 + for im_idx, dets, gts in zip(im_idx_list, det_boxes, gt_boxes_list): + if image_done % 100 == 0: + print "%d images done" % image_done + image_done += 1 + + if dets.shape[0] == 0: + continue + img = cv2.imread(im_idx) + dets = convert_to_square(dets) + dets[:, 0:4] = np.round(dets[:, 0:4]) + + for box in dets: + x_left, y_top, x_right, y_bottom = box[0:4].astype(int) + width = x_right - x_left + 1 + height = y_bottom - y_top + 1 + + # ignore box that is too small or beyond image border + if width < 20 or x_left < 0 or y_top < 0 or x_right > img.shape[1] - 1 or y_bottom > img.shape[0] - 1: + continue + + # compute intersection over union(IoU) between current box and all gt boxes + Iou = IoU(box, gts) + cropped_im = img[y_top:y_bottom + 1, x_left:x_right + 1, :] + resized_im = cv2.resize(cropped_im, (image_size, image_size), + interpolation=cv2.INTER_LINEAR) + + # save negative images and write label + if np.max(Iou) < 0.3: + # Iou with all gts must below 0.3 + save_file = os.path.join(neg_save_dir, "%s.jpg" % n_idx) + f2.write(save_file + ' 0\n') + cv2.imwrite(save_file, resized_im) + n_idx += 1 + else: + # find gt_box with the highest iou + idx = np.argmax(Iou) + assigned_gt = gts[idx] + x1, y1, x2, y2 = assigned_gt + + # compute bbox reg label + offset_x1 = (x1 - x_left) / float(width) + offset_y1 = (y1 - y_top) / float(height) + offset_x2 = (x2 - x_right) / float(width) + offset_y2 = (y2 - y_bottom) / float(height) + + # save positive and part-face images and write labels + if np.max(Iou) >= 0.65: + save_file = os.path.join(pos_save_dir, "%s.jpg" % p_idx) + f1.write(save_file + ' 1 %.2f %.2f %.2f %.2f\n' % ( + offset_x1, offset_y1, offset_x2, offset_y2)) + cv2.imwrite(save_file, resized_im) + p_idx += 1 + + elif np.max(Iou) >= 0.4: + save_file = os.path.join(part_save_dir, "%s.jpg" % d_idx) + f3.write(save_file + ' -1 %.2f %.2f %.2f %.2f\n' % ( + offset_x1, offset_y1, offset_x2, offset_y2)) + cv2.imwrite(save_file, resized_im) + d_idx += 1 + f1.close() + f2.close() + f3.close() + + + + + + + +def model_store_path(): + return os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))+"/model_store" + + + +def parse_args(): + parser = argparse.ArgumentParser(description='Test mtcnn', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument('--dataset_path', dest='dataset_path', help='dataset folder', + default='../data/wider/', type=str) + parser.add_argument('--anno_file', dest='annotation_file', help='dataset original annotation file', + default='../data/wider/anno.txt', type=str) + parser.add_argument('--pmodel_file', dest='pnet_model_file', help='PNet model file path', + default='/idata/workspace/mtcnn/model_store/pnet_epoch_5best.pt', type=str) + parser.add_argument('--gpu', dest='use_cuda', help='with gpu', + default=config.USE_CUDA, type=bool) + parser.add_argument('--prefix_path', dest='prefix_path', help='image prefix root path', + default='', type=str) + + + args = parser.parse_args() + return args + + + +if __name__ == '__main__': + args = parse_args() + gen_rnet_data(args.dataset_path, args.annotation_file, args.pnet_model_file, args.prefix_path, args.use_cuda) + + + diff --git a/src/prepare_data/gen_landmark_12.py b/src/prepare_data/gen_landmark_12.py new file mode 100644 index 0000000..cb84a05 --- /dev/null +++ b/src/prepare_data/gen_landmark_12.py @@ -0,0 +1,156 @@ +# coding: utf-8 +import os +import cv2 +import numpy as np +import sys +import numpy.random as npr +import argparse +import config +import core.utils as utils + + +def gen_data(anno_file, data_dir, prefix): + + + size = 12 + image_id = 0 + + landmark_imgs_save_dir = os.path.join(data_dir,"12/landmark") + if not os.path.exists(landmark_imgs_save_dir): + os.makedirs(landmark_imgs_save_dir) + + anno_dir = config.ANNO_STORE_DIR + if not os.path.exists(anno_dir): + os.makedirs(anno_dir) + + landmark_anno_filename = config.PNET_LANDMARK_ANNO_FILENAME + save_landmark_anno = os.path.join(anno_dir,landmark_anno_filename) + + f = open(save_landmark_anno, 'w') + # dstdir = "train_landmark_few" + + + with open(anno_file, 'r') as f2: + annotations = f2.readlines() + + num = len(annotations) + print "%d pics in total" % num + + l_idx =0 + idx = 0 + # image_path bbox landmark(5*2) + for annotation in annotations: + # print imgPath + + annotation = annotation.strip().split(' ') + + assert len(annotation)==15,"each line should have 15 element" + + im_path = os.path.join(prefix,annotation[0].replace("\\", "/")) + + gt_box = map(float, annotation[1:5]) + gt_box = [gt_box[0], gt_box[2], gt_box[1], gt_box[3]] + + + gt_box = np.array(gt_box, dtype=np.int32) + + + + landmark = bbox = map(float, annotation[5:]) + landmark = np.array(landmark, dtype=np.float) + + img = cv2.imread(im_path) + assert (img is not None) + + height, width, channel = img.shape + # crop_face = img[gt_box[1]:gt_box[3]+1, gt_box[0]:gt_box[2]+1] + # crop_face = cv2.resize(crop_face,(size,size)) + + idx = idx + 1 + if idx % 100 == 0: + print "%d images done, landmark images: %d"%(idx,l_idx) + + x1, y1, x2, y2 = gt_box + + # gt's width + w = x2 - x1 + 1 + # gt's height + h = y2 - y1 + 1 + if max(w, h) < 40 or x1 < 0 or y1 < 0: + continue + # random shift + for i in range(10): + bbox_size = npr.randint(int(min(w, h) * 0.8), np.ceil(1.25 * max(w, h))) + delta_x = npr.randint(-w * 0.2, w * 0.2) + delta_y = npr.randint(-h * 0.2, h * 0.2) + nx1 = max(x1 + w / 2 - bbox_size / 2 + delta_x, 0) + ny1 = max(y1 + h / 2 - bbox_size / 2 + delta_y, 0) + + nx2 = nx1 + bbox_size + ny2 = ny1 + bbox_size + if nx2 > width or ny2 > height: + continue + crop_box = np.array([nx1, ny1, nx2, ny2]) + cropped_im = img[ny1:ny2 + 1, nx1:nx2 + 1, :] + resized_im = cv2.resize(cropped_im, (size, size),interpolation=cv2.INTER_LINEAR) + + offset_x1 = (x1 - nx1) / float(bbox_size) + offset_y1 = (y1 - ny1) / float(bbox_size) + offset_x2 = (x2 - nx2) / float(bbox_size) + offset_y2 = (y2 - ny2) / float(bbox_size) + + offset_left_eye_x = (landmark[0] - nx1) / float(bbox_size) + offset_left_eye_y = (landmark[1] - ny1) / float(bbox_size) + + offset_right_eye_x = (landmark[2] - nx1) / float(bbox_size) + offset_right_eye_y = (landmark[3] - ny1) / float(bbox_size) + + offset_nose_x = (landmark[4] - nx1) / float(bbox_size) + offset_nose_y = (landmark[5] - ny1) / float(bbox_size) + + offset_left_mouth_x = (landmark[6] - nx1) / float(bbox_size) + offset_left_mouth_y = (landmark[7] - ny1) / float(bbox_size) + + offset_right_mouth_x = (landmark[8] - nx1) / float(bbox_size) + offset_right_mouth_y = (landmark[9] - ny1) / float(bbox_size) + + + # cal iou + iou = utils.IoU(crop_box.astype(np.float), np.expand_dims(gt_box.astype(np.float), 0)) + if iou > 0.65: + save_file = os.path.join(landmark_imgs_save_dir, "%s.jpg" % l_idx) + cv2.imwrite(save_file, resized_im) + + f.write(save_file + ' -2 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f \n' % \ + (offset_x1, offset_y1, offset_x2, offset_y2, \ + offset_left_eye_x,offset_left_eye_y,offset_right_eye_x,offset_right_eye_y,offset_nose_x,offset_nose_y,offset_left_mouth_x,offset_left_mouth_y,offset_right_mouth_x,offset_right_mouth_y)) + + l_idx += 1 + + + f.close() + + + + +def parse_args(): + parser = argparse.ArgumentParser(description='Test mtcnn', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument('--dataset_path', dest='dataset_path', help='dataset folder', + default='../data/wider/', type=str) + parser.add_argument('--anno_file', dest='annotation_file', help='dataset original annotation file', + default='../data/wider/anno.txt', type=str) + parser.add_argument('--prefix_path', dest='prefix_path', help='image prefix root path', + default='../data/', type=str) + + + args = parser.parse_args() + return args + +if __name__ == '__main__': + args = parse_args() + + gen_data(args.annotation_file, args.dataset_path, args.prefix_path) + + diff --git a/src/prepare_data/gen_landmark_24.py b/src/prepare_data/gen_landmark_24.py new file mode 100644 index 0000000..10a63e0 --- /dev/null +++ b/src/prepare_data/gen_landmark_24.py @@ -0,0 +1,154 @@ +# coding: utf-8 +import os +import cv2 +import numpy as np +import random +import sys +import numpy.random as npr +import argparse +import config +import core.utils as utils + + + +def gen_data(anno_file, data_dir, prefix): + + + size = 24 + image_id = 0 + + landmark_imgs_save_dir = os.path.join(data_dir,"24/landmark") + if not os.path.exists(landmark_imgs_save_dir): + os.makedirs(landmark_imgs_save_dir) + + anno_dir = config.ANNO_STORE_DIR + if not os.path.exists(anno_dir): + os.makedirs(anno_dir) + + landmark_anno_filename = config.RNET_LANDMARK_ANNO_FILENAME + save_landmark_anno = os.path.join(anno_dir,landmark_anno_filename) + + f = open(save_landmark_anno, 'w') + # dstdir = "train_landmark_few" + + with open(anno_file, 'r') as f2: + annotations = f2.readlines() + + num = len(annotations) + print "%d total images" % num + + l_idx =0 + idx = 0 + # image_path bbox landmark(5*2) + for annotation in annotations: + # print imgPath + + annotation = annotation.strip().split(' ') + + assert len(annotation)==15,"each line should have 15 element" + + im_path = os.path.join(prefix,annotation[0].replace("\\", "/")) + + gt_box = map(float, annotation[1:5]) + gt_box = [gt_box[0], gt_box[2], gt_box[1], gt_box[3]] + + + gt_box = np.array(gt_box, dtype=np.int32) + + landmark = map(float, annotation[5:]) + landmark = np.array(landmark, dtype=np.float) + + img = cv2.imread(im_path) + assert (img is not None) + + height, width, channel = img.shape + # crop_face = img[gt_box[1]:gt_box[3]+1, gt_box[0]:gt_box[2]+1] + # crop_face = cv2.resize(crop_face,(size,size)) + + idx = idx + 1 + if idx % 100 == 0: + print "%d images done, landmark images: %d"%(idx,l_idx) + + x1, y1, x2, y2 = gt_box + + # gt's width + w = x2 - x1 + 1 + # gt's height + h = y2 - y1 + 1 + if max(w, h) < 40 or x1 < 0 or y1 < 0: + continue + # random shift + for i in range(10): + bbox_size = npr.randint(int(min(w, h) * 0.8), np.ceil(1.25 * max(w, h))) + delta_x = npr.randint(-w * 0.2, w * 0.2) + delta_y = npr.randint(-h * 0.2, h * 0.2) + nx1 = max(x1 + w / 2 - bbox_size / 2 + delta_x, 0) + ny1 = max(y1 + h / 2 - bbox_size / 2 + delta_y, 0) + + nx2 = nx1 + bbox_size + ny2 = ny1 + bbox_size + if nx2 > width or ny2 > height: + continue + crop_box = np.array([nx1, ny1, nx2, ny2]) + cropped_im = img[ny1:ny2 + 1, nx1:nx2 + 1, :] + resized_im = cv2.resize(cropped_im, (size, size),interpolation=cv2.INTER_LINEAR) + + offset_x1 = (x1 - nx1) / float(bbox_size) + offset_y1 = (y1 - ny1) / float(bbox_size) + offset_x2 = (x2 - nx2) / float(bbox_size) + offset_y2 = (y2 - ny2) / float(bbox_size) + + offset_left_eye_x = (landmark[0] - nx1) / float(bbox_size) + offset_left_eye_y = (landmark[1] - ny1) / float(bbox_size) + + offset_right_eye_x = (landmark[2] - nx1) / float(bbox_size) + offset_right_eye_y = (landmark[3] - ny1) / float(bbox_size) + + offset_nose_x = (landmark[4] - nx1) / float(bbox_size) + offset_nose_y = (landmark[5] - ny1) / float(bbox_size) + + offset_left_mouth_x = (landmark[6] - nx1) / float(bbox_size) + offset_left_mouth_y = (landmark[7] - ny1) / float(bbox_size) + + offset_right_mouth_x = (landmark[8] - nx1) / float(bbox_size) + offset_right_mouth_y = (landmark[9] - ny1) / float(bbox_size) + + + # cal iou + iou = utils.IoU(crop_box.astype(np.float), np.expand_dims(gt_box.astype(np.float), 0)) + if iou > 0.65: + save_file = os.path.join(landmark_imgs_save_dir, "%s.jpg" % l_idx) + cv2.imwrite(save_file, resized_im) + + f.write(save_file + ' -2 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f \n' % \ + (offset_x1, offset_y1, offset_x2, offset_y2, \ + offset_left_eye_x,offset_left_eye_y,offset_right_eye_x,offset_right_eye_y,offset_nose_x,offset_nose_y,offset_left_mouth_x,offset_left_mouth_y,offset_right_mouth_x,offset_right_mouth_y)) + + l_idx += 1 + + f.close() + + + + +def parse_args(): + parser = argparse.ArgumentParser(description='Test mtcnn', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument('--dataset_path', dest='dataset_path', help='dataset folder', + default='/idata/data/wider/', type=str) + parser.add_argument('--anno_file', dest='annotation_file', help='dataset original annotation file', + default='/idata/data/trainImageList.txt', type=str) + parser.add_argument('--prefix_path', dest='prefix_path', help='image prefix root path', + default='/idata/data', type=str) + + + args = parser.parse_args() + return args + +if __name__ == '__main__': + args = parse_args() + + gen_data(args.annotation_file, args.dataset_path, args.prefix_path) + + diff --git a/src/prepare_data/gen_landmark_48.py b/src/prepare_data/gen_landmark_48.py new file mode 100644 index 0000000..77fc980 --- /dev/null +++ b/src/prepare_data/gen_landmark_48.py @@ -0,0 +1,153 @@ +# coding: utf-8 +import os +import cv2 +import numpy as np +import random +import sys +import numpy.random as npr +import argparse +import config +import core.utils as utils + + +def gen_data(anno_file, data_dir, prefix): + + + size = 48 + image_id = 0 + + landmark_imgs_save_dir = os.path.join(data_dir,"48/landmark") + if not os.path.exists(landmark_imgs_save_dir): + os.makedirs(landmark_imgs_save_dir) + + anno_dir = config.ANNO_STORE_DIR + if not os.path.exists(anno_dir): + os.makedirs(anno_dir) + + landmark_anno_filename = config.ONET_LANDMARK_ANNO_FILENAME + save_landmark_anno = os.path.join(anno_dir,landmark_anno_filename) + + f = open(save_landmark_anno, 'w') + # dstdir = "train_landmark_few" + + with open(anno_file, 'r') as f2: + annotations = f2.readlines() + + num = len(annotations) + print "%d total images" % num + + l_idx =0 + idx = 0 + # image_path bbox landmark(5*2) + for annotation in annotations: + # print imgPath + + annotation = annotation.strip().split(' ') + + assert len(annotation)==15,"each line should have 15 element" + + im_path = os.path.join(prefix,annotation[0].replace("\\", "/")) + + gt_box = map(float, annotation[1:5]) + # gt_box = [gt_box[0], gt_box[2], gt_box[1], gt_box[3]] + + + gt_box = np.array(gt_box, dtype=np.int32) + + landmark = map(float, annotation[5:]) + landmark = np.array(landmark, dtype=np.float) + + img = cv2.imread(im_path) + assert (img is not None) + + height, width, channel = img.shape + # crop_face = img[gt_box[1]:gt_box[3]+1, gt_box[0]:gt_box[2]+1] + # crop_face = cv2.resize(crop_face,(size,size)) + + idx = idx + 1 + if idx % 100 == 0: + print "%d images done, landmark images: %d"%(idx,l_idx) + + x1, y1, x2, y2 = gt_box + + # gt's width + w = x2 - x1 + 1 + # gt's height + h = y2 - y1 + 1 + if max(w, h) < 40 or x1 < 0 or y1 < 0: + continue + # random shift + for i in range(10): + bbox_size = npr.randint(int(min(w, h) * 0.8), np.ceil(1.25 * max(w, h))) + delta_x = npr.randint(-w * 0.2, w * 0.2) + delta_y = npr.randint(-h * 0.2, h * 0.2) + nx1 = max(x1 + w / 2 - bbox_size / 2 + delta_x, 0) + ny1 = max(y1 + h / 2 - bbox_size / 2 + delta_y, 0) + + nx2 = nx1 + bbox_size + ny2 = ny1 + bbox_size + if nx2 > width or ny2 > height: + continue + crop_box = np.array([nx1, ny1, nx2, ny2]) + cropped_im = img[ny1:ny2 + 1, nx1:nx2 + 1, :] + resized_im = cv2.resize(cropped_im, (size, size),interpolation=cv2.INTER_LINEAR) + + offset_x1 = (x1 - nx1) / float(bbox_size) + offset_y1 = (y1 - ny1) / float(bbox_size) + offset_x2 = (x2 - nx2) / float(bbox_size) + offset_y2 = (y2 - ny2) / float(bbox_size) + + offset_left_eye_x = (landmark[0] - nx1) / float(bbox_size) + offset_left_eye_y = (landmark[1] - ny1) / float(bbox_size) + + offset_right_eye_x = (landmark[2] - nx1) / float(bbox_size) + offset_right_eye_y = (landmark[3] - ny1) / float(bbox_size) + + offset_nose_x = (landmark[4] - nx1) / float(bbox_size) + offset_nose_y = (landmark[5] - ny1) / float(bbox_size) + + offset_left_mouth_x = (landmark[6] - nx1) / float(bbox_size) + offset_left_mouth_y = (landmark[7] - ny1) / float(bbox_size) + + offset_right_mouth_x = (landmark[8] - nx1) / float(bbox_size) + offset_right_mouth_y = (landmark[9] - ny1) / float(bbox_size) + + + # cal iou + iou = utils.IoU(crop_box.astype(np.float), np.expand_dims(gt_box.astype(np.float), 0)) + if iou > 0.65: + save_file = os.path.join(landmark_imgs_save_dir, "%s.jpg" % l_idx) + cv2.imwrite(save_file, resized_im) + + f.write(save_file + ' -2 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f \n' % \ + (offset_x1, offset_y1, offset_x2, offset_y2, \ + offset_left_eye_x,offset_left_eye_y,offset_right_eye_x,offset_right_eye_y,offset_nose_x,offset_nose_y,offset_left_mouth_x,offset_left_mouth_y,offset_right_mouth_x,offset_right_mouth_y)) + + l_idx += 1 + + f.close() + + + + +def parse_args(): + parser = argparse.ArgumentParser(description='Test mtcnn', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument('--dataset_path', dest='dataset_path', help='dataset folder', + default='/idata/data/wider/', type=str) + parser.add_argument('--anno_file', dest='annotation_file', help='dataset original annotation file', + default='/idata/data/trainImageList.txt', type=str) + parser.add_argument('--prefix_path', dest='prefix_path', help='image prefix root path', + default='/idata/data', type=str) + + + args = parser.parse_args() + return args + +if __name__ == '__main__': + args = parse_args() + + gen_data(args.annotation_file, args.dataset_path, args.prefix_path) + + diff --git a/src/prepare_data/gen_landmark_net_48.py b/src/prepare_data/gen_landmark_net_48.py new file mode 100644 index 0000000..0003596 --- /dev/null +++ b/src/prepare_data/gen_landmark_net_48.py @@ -0,0 +1,234 @@ +import argparse + +import cv2 +import numpy as np +from core.detect import MtcnnDetector,create_mtcnn_net +from core.imagedb import ImageDB +from core.image_reader import TestImageLoader +import time +import os +import cPickle +from core.utils import convert_to_square,IoU +import config +import core.vision as vision + +def gen_landmark48_data(data_dir, anno_file, pnet_model_file, rnet_model_file, prefix_path='', use_cuda=True, vis=False): + + + pnet, rnet, _ = create_mtcnn_net(p_model_path=pnet_model_file, r_model_path=rnet_model_file, use_cuda=use_cuda) + mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, min_face_size=12) + + imagedb = ImageDB(anno_file,mode="test",prefix_path=prefix_path) + imdb = imagedb.load_imdb() + image_reader = TestImageLoader(imdb,1,False) + + all_boxes = list() + batch_idx = 0 + + for databatch in image_reader: + if batch_idx % 100 == 0: + print "%d images done" % batch_idx + im = databatch + + + if im.shape[0] >= 1200 or im.shape[1] >=1200: + all_boxes.append(np.array([])) + batch_idx += 1 + continue + + + t = time.time() + + p_boxes, p_boxes_align = mtcnn_detector.detect_pnet(im=im) + + boxes, boxes_align = mtcnn_detector.detect_rnet(im=im, dets=p_boxes_align) + + if boxes_align is None: + all_boxes.append(np.array([])) + batch_idx += 1 + continue + if vis: + rgb_im = cv2.cvtColor(np.asarray(im), cv2.COLOR_BGR2RGB) + vision.vis_two(rgb_im, boxes, boxes_align) + + t1 = time.time() - t + t = time.time() + all_boxes.append(boxes_align) + batch_idx += 1 + + save_path = config.MODEL_STORE_DIR + + if not os.path.exists(save_path): + os.mkdir(save_path) + + save_file = os.path.join(save_path, "detections_%d.pkl" % int(time.time())) + with open(save_file, 'wb') as f: + cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL) + + + gen_sample_data(data_dir,anno_file,save_file, prefix_path) + + + +def gen_sample_data(data_dir, anno_file, det_boxs_file, prefix_path =''): + + landmark_save_dir = os.path.join(data_dir, "48/landmark") + + if not os.path.exists(landmark_save_dir): + os.makedirs(landmark_save_dir) + + + # load ground truth from annotation file + # format of each line: image/path [x1,y1,x2,y2] for each gt_box in this image + + with open(anno_file, 'r') as f: + annotations = f.readlines() + + image_size = 48 + net = "onet" + + im_idx_list = list() + gt_boxes_list = list() + gt_landmark_list = list() + num_of_images = len(annotations) + print "processing %d images in total" % num_of_images + + for annotation in annotations: + annotation = annotation.strip().split(' ') + im_idx = annotation[0] + + boxes = map(float, annotation[1:5]) + boxes = np.array(boxes, dtype=np.float32).reshape(-1, 4) + landmarks = map(float, annotation[5:]) + landmarks = np.array(landmarks, dtype=np.float32).reshape(-1, 10) + + im_idx_list.append(im_idx) + gt_boxes_list.append(boxes) + gt_landmark_list.append(landmarks) + + + save_path = config.ANNO_STORE_DIR + if not os.path.exists(save_path): + os.makedirs(save_path) + + f = open(os.path.join(save_path, 'landmark_48.txt'), 'w') + + + det_handle = open(det_boxs_file, 'r') + + det_boxes = cPickle.load(det_handle) + print len(det_boxes), num_of_images + assert len(det_boxes) == num_of_images, "incorrect detections or ground truths" + + # index of neg, pos and part face, used as their image names + p_idx = 0 + image_done = 0 + for im_idx, dets, gts, landmark in zip(im_idx_list, det_boxes, gt_boxes_list, gt_landmark_list): + if image_done % 100 == 0: + print "%d images done" % image_done + image_done += 1 + + if dets.shape[0] == 0: + continue + img = cv2.imread(os.path.join(prefix_path,im_idx)) + dets = convert_to_square(dets) + dets[:, 0:4] = np.round(dets[:, 0:4]) + + for box in dets: + x_left, y_top, x_right, y_bottom = box[0:4].astype(int) + width = x_right - x_left + 1 + height = y_bottom - y_top + 1 + + # ignore box that is too small or beyond image border + if width < 20 or x_left < 0 or y_top < 0 or x_right > img.shape[1] - 1 or y_bottom > img.shape[0] - 1: + continue + + # compute intersection over union(IoU) between current box and all gt boxes + Iou = IoU(box, gts) + cropped_im = img[y_top:y_bottom + 1, x_left:x_right + 1, :] + resized_im = cv2.resize(cropped_im, (image_size, image_size), + interpolation=cv2.INTER_LINEAR) + + # save negative images and write label + if np.max(Iou) < 0.3: + # Iou with all gts must below 0.3 + continue + else: + # find gt_box with the highest iou + idx = np.argmax(Iou) + assigned_gt = gts[idx] + x1, y1, x2, y2 = assigned_gt + + # compute bbox reg label + offset_x1 = (x1 - x_left) / float(width) + offset_y1 = (y1 - y_top) / float(height) + offset_x2 = (x2 - x_right) / float(width) + offset_y2 = (y2 - y_bottom) / float(height) + + offset_left_eye_x = (landmark[0,0] - x_left) / float(width) + offset_left_eye_y = (landmark[0,1] - y_top) / float(height) + + offset_right_eye_x = (landmark[0,2] - x_left) / float(width) + offset_right_eye_y = (landmark[0,3] - y_top) / float(height) + + offset_nose_x = (landmark[0,4] - x_left) / float(width) + offset_nose_y = (landmark[0,5] - y_top) / float(height) + + offset_left_mouth_x = (landmark[0,6] - x_left) / float(width) + offset_left_mouth_y = (landmark[0,7] - y_top) / float(height) + + offset_right_mouth_x = (landmark[0,8] - x_left) / float(width) + offset_right_mouth_y = (landmark[0,9] - y_top) / float(height) + + + + # save positive and part-face images and write labels + if np.max(Iou) >= 0.65: + save_file = os.path.join(landmark_save_dir, "%s.jpg" % p_idx) + + f.write(save_file + ' -2 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f \n' % \ + (offset_x1, offset_y1, offset_x2, offset_y2, \ + offset_left_eye_x, offset_left_eye_y, offset_right_eye_x, offset_right_eye_y, + offset_nose_x, offset_nose_y, offset_left_mouth_x, offset_left_mouth_y, + offset_right_mouth_x, offset_right_mouth_y)) + + cv2.imwrite(save_file, resized_im) + p_idx += 1 + + f.close() + + + +def model_store_path(): + return os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))+"/model_store" + + + +def parse_args(): + parser = argparse.ArgumentParser(description='Test mtcnn', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument('--dataset_path', dest='dataset_path', help='dataset folder', + default='../data/wider/', type=str) + parser.add_argument('--anno_file', dest='annotation_file', help='output data folder', + default='../data/wider/anno.txt', type=str) + parser.add_argument('--pmodel_file', dest='pnet_model_file', help='PNet model file path', + default='/idata/workspace/mtcnn/model_store/pnet_epoch_5best.pt', type=str) + parser.add_argument('--rmodel_file', dest='rnet_model_file', help='RNet model file path', + default='/idata/workspace/mtcnn/model_store/rnet_epoch_1.pt', type=str) + parser.add_argument('--gpu', dest='use_cuda', help='with gpu', + default=config.USE_CUDA, type=bool) + parser.add_argument('--prefix_path', dest='prefix_path', help='image prefix root path', + default='', type=str) + + args = parser.parse_args() + return args + + + +if __name__ == '__main__': + args = parse_args() + gen_landmark48_data(args.dataset_path, args.annotation_file, args.pnet_model_file, args.rnet_model_file, args.prefix_path, args.use_cuda) + + + diff --git a/src/train_net/__init__.py b/src/train_net/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/train_net/train.py b/src/train_net/train.py new file mode 100644 index 0000000..d095c93 --- /dev/null +++ b/src/train_net/train.py @@ -0,0 +1,281 @@ +from core.image_reader import TrainImageReader +import datetime +import os +from core.models import PNet,RNet,ONet,LossFn +import torch +from torch.autograd import Variable +import core.image_tools as image_tools + + + + + +def compute_accuracy(prob_cls, gt_cls): + prob_cls = torch.squeeze(prob_cls) + gt_cls = torch.squeeze(gt_cls) + + #we only need the detection which >= 0 + mask = torch.ge(gt_cls,0) + #get valid element + valid_gt_cls = torch.masked_select(gt_cls,mask) + valid_prob_cls = torch.masked_select(prob_cls,mask) + size = min(valid_gt_cls.size()[0], valid_prob_cls.size()[0]) + prob_ones = torch.ge(valid_prob_cls,0.6).float() + right_ones = torch.eq(prob_ones,valid_gt_cls).float() + + return torch.div(torch.mul(torch.sum(right_ones),float(1.0)),float(size)) + + +def train_pnet(model_store_path, end_epoch,imdb, + batch_size,frequent=50,base_lr=0.01,use_cuda=True): + + if not os.path.exists(model_store_path): + os.makedirs(model_store_path) + + lossfn = LossFn() + net = PNet(is_train=True, use_cuda=use_cuda) + net.train() + if use_cuda: + net.cuda() + + optimizer = torch.optim.Adam(net.parameters(), lr=base_lr) + + train_data=TrainImageReader(imdb,12,batch_size,shuffle=True) + + + for cur_epoch in range(1,end_epoch+1): + train_data.reset() + accuracy_list=[] + cls_loss_list=[] + bbox_loss_list=[] + # landmark_loss_list=[] + + for batch_idx,(image,(gt_label,gt_bbox,gt_landmark))in enumerate(train_data): + + im_tensor = [ image_tools.convert_image_to_tensor(image[i,:,:,:]) for i in range(image.shape[0]) ] + im_tensor = torch.stack(im_tensor) + + im_tensor = Variable(im_tensor) + gt_label = Variable(torch.from_numpy(gt_label).float()) + + gt_bbox = Variable(torch.from_numpy(gt_bbox).float()) + # gt_landmark = Variable(torch.from_numpy(gt_landmark).float()) + + if use_cuda: + im_tensor = im_tensor.cuda() + gt_label = gt_label.cuda() + gt_bbox = gt_bbox.cuda() + # gt_landmark = gt_landmark.cuda() + + cls_pred, box_offset_pred = net(im_tensor) + # all_loss, cls_loss, offset_loss = lossfn.loss(gt_label=label_y,gt_offset=bbox_y, pred_label=cls_pred, pred_offset=box_offset_pred) + + cls_loss = lossfn.cls_loss(gt_label,cls_pred) + box_offset_loss = lossfn.box_loss(gt_label,gt_bbox,box_offset_pred) + # landmark_loss = lossfn.landmark_loss(gt_label,gt_landmark,landmark_offset_pred) + + all_loss = cls_loss*1.0+box_offset_loss*0.5 + + if batch_idx%frequent==0: + accuracy=compute_accuracy(cls_pred,gt_label) + + show1 = accuracy.data.tolist()[0] + show2 = cls_loss.data.tolist()[0] + show3 = box_offset_loss.data.tolist()[0] + show5 = all_loss.data.tolist()[0] + + print "%s : Epoch: %d, Step: %d, accuracy: %s, det loss: %s, bbox loss: %s, all_loss: %s, lr:%s "%(datetime.datetime.now(),cur_epoch,batch_idx, show1,show2,show3,show5,base_lr) + accuracy_list.append(accuracy) + cls_loss_list.append(cls_loss) + bbox_loss_list.append(box_offset_loss) + + optimizer.zero_grad() + all_loss.backward() + optimizer.step() + + + accuracy_avg = torch.mean(torch.cat(accuracy_list)) + cls_loss_avg = torch.mean(torch.cat(cls_loss_list)) + bbox_loss_avg = torch.mean(torch.cat(bbox_loss_list)) + # landmark_loss_avg = torch.mean(torch.cat(landmark_loss_list)) + + show6 = accuracy_avg.data.tolist()[0] + show7 = cls_loss_avg.data.tolist()[0] + show8 = bbox_loss_avg.data.tolist()[0] + + print "Epoch: %d, accuracy: %s, cls loss: %s, bbox loss: %s" % (cur_epoch, show6, show7, show8) + torch.save(net.state_dict(), os.path.join(model_store_path,"pnet_epoch_%d.pt" % cur_epoch)) + torch.save(net, os.path.join(model_store_path,"pnet_epoch_model_%d.pkl" % cur_epoch)) + + + + +def train_rnet(model_store_path, end_epoch,imdb, + batch_size,frequent=50,base_lr=0.01,use_cuda=True): + + if not os.path.exists(model_store_path): + os.makedirs(model_store_path) + + lossfn = LossFn() + net = RNet(is_train=True, use_cuda=use_cuda) + net.train() + if use_cuda: + net.cuda() + + optimizer = torch.optim.Adam(net.parameters(), lr=base_lr) + + train_data=TrainImageReader(imdb,24,batch_size,shuffle=True) + + + for cur_epoch in range(1,end_epoch+1): + train_data.reset() + accuracy_list=[] + cls_loss_list=[] + bbox_loss_list=[] + landmark_loss_list=[] + + for batch_idx,(image,(gt_label,gt_bbox,gt_landmark))in enumerate(train_data): + + im_tensor = [ image_tools.convert_image_to_tensor(image[i,:,:,:]) for i in range(image.shape[0]) ] + im_tensor = torch.stack(im_tensor) + + im_tensor = Variable(im_tensor) + gt_label = Variable(torch.from_numpy(gt_label).float()) + + gt_bbox = Variable(torch.from_numpy(gt_bbox).float()) + gt_landmark = Variable(torch.from_numpy(gt_landmark).float()) + + if use_cuda: + im_tensor = im_tensor.cuda() + gt_label = gt_label.cuda() + gt_bbox = gt_bbox.cuda() + gt_landmark = gt_landmark.cuda() + + cls_pred, box_offset_pred = net(im_tensor) + # all_loss, cls_loss, offset_loss = lossfn.loss(gt_label=label_y,gt_offset=bbox_y, pred_label=cls_pred, pred_offset=box_offset_pred) + + cls_loss = lossfn.cls_loss(gt_label,cls_pred) + box_offset_loss = lossfn.box_loss(gt_label,gt_bbox,box_offset_pred) + # landmark_loss = lossfn.landmark_loss(gt_label,gt_landmark,landmark_offset_pred) + + all_loss = cls_loss*1.0+box_offset_loss*0.5 + + if batch_idx%frequent==0: + accuracy=compute_accuracy(cls_pred,gt_label) + + show1 = accuracy.data.tolist()[0] + show2 = cls_loss.data.tolist()[0] + show3 = box_offset_loss.data.tolist()[0] + # show4 = landmark_loss.data.tolist()[0] + show5 = all_loss.data.tolist()[0] + + print "%s : Epoch: %d, Step: %d, accuracy: %s, det loss: %s, bbox loss: %s, all_loss: %s, lr:%s "%(datetime.datetime.now(), cur_epoch, batch_idx, show1, show2, show3, show5, base_lr) + accuracy_list.append(accuracy) + cls_loss_list.append(cls_loss) + bbox_loss_list.append(box_offset_loss) + # landmark_loss_list.append(landmark_loss) + + optimizer.zero_grad() + all_loss.backward() + optimizer.step() + + + accuracy_avg = torch.mean(torch.cat(accuracy_list)) + cls_loss_avg = torch.mean(torch.cat(cls_loss_list)) + bbox_loss_avg = torch.mean(torch.cat(bbox_loss_list)) + # landmark_loss_avg = torch.mean(torch.cat(landmark_loss_list)) + + show6 = accuracy_avg.data.tolist()[0] + show7 = cls_loss_avg.data.tolist()[0] + show8 = bbox_loss_avg.data.tolist()[0] + # show9 = landmark_loss_avg.data.tolist()[0] + + print "Epoch: %d, accuracy: %s, cls loss: %s, bbox loss: %s" % (cur_epoch, show6, show7, show8) + torch.save(net.state_dict(), os.path.join(model_store_path,"rnet_epoch_%d.pt" % cur_epoch)) + torch.save(net, os.path.join(model_store_path,"rnet_epoch_model_%d.pkl" % cur_epoch)) + + +def train_onet(model_store_path, end_epoch,imdb, + batch_size,frequent=50,base_lr=0.01,use_cuda=True): + + if not os.path.exists(model_store_path): + os.makedirs(model_store_path) + + lossfn = LossFn() + net = ONet(is_train=True) + net.train() + if use_cuda: + net.cuda() + + optimizer = torch.optim.Adam(net.parameters(), lr=base_lr) + + train_data=TrainImageReader(imdb,48,batch_size,shuffle=True) + + + for cur_epoch in range(1,end_epoch+1): + train_data.reset() + accuracy_list=[] + cls_loss_list=[] + bbox_loss_list=[] + landmark_loss_list=[] + + for batch_idx,(image,(gt_label,gt_bbox,gt_landmark))in enumerate(train_data): + + im_tensor = [ image_tools.convert_image_to_tensor(image[i,:,:,:]) for i in range(image.shape[0]) ] + im_tensor = torch.stack(im_tensor) + + im_tensor = Variable(im_tensor) + gt_label = Variable(torch.from_numpy(gt_label).float()) + + gt_bbox = Variable(torch.from_numpy(gt_bbox).float()) + gt_landmark = Variable(torch.from_numpy(gt_landmark).float()) + + if use_cuda: + im_tensor = im_tensor.cuda() + gt_label = gt_label.cuda() + gt_bbox = gt_bbox.cuda() + gt_landmark = gt_landmark.cuda() + + cls_pred, box_offset_pred, landmark_offset_pred = net(im_tensor) + # all_loss, cls_loss, offset_loss = lossfn.loss(gt_label=label_y,gt_offset=bbox_y, pred_label=cls_pred, pred_offset=box_offset_pred) + + cls_loss = lossfn.cls_loss(gt_label,cls_pred) + box_offset_loss = lossfn.box_loss(gt_label,gt_bbox,box_offset_pred) + landmark_loss = lossfn.landmark_loss(gt_label,gt_landmark,landmark_offset_pred) + + all_loss = cls_loss*0.8+box_offset_loss*0.6+landmark_loss*1.5 + + if batch_idx%frequent==0: + accuracy=compute_accuracy(cls_pred,gt_label) + + show1 = accuracy.data.tolist()[0] + show2 = cls_loss.data.tolist()[0] + show3 = box_offset_loss.data.tolist()[0] + show4 = landmark_loss.data.tolist()[0] + show5 = all_loss.data.tolist()[0] + + print "%s : Epoch: %d, Step: %d, accuracy: %s, det loss: %s, bbox loss: %s, landmark loss: %s, all_loss: %s, lr:%s "%(datetime.datetime.now(),cur_epoch,batch_idx, show1,show2,show3,show4,show5,base_lr) + accuracy_list.append(accuracy) + cls_loss_list.append(cls_loss) + bbox_loss_list.append(box_offset_loss) + landmark_loss_list.append(landmark_loss) + + optimizer.zero_grad() + all_loss.backward() + optimizer.step() + + + accuracy_avg = torch.mean(torch.cat(accuracy_list)) + cls_loss_avg = torch.mean(torch.cat(cls_loss_list)) + bbox_loss_avg = torch.mean(torch.cat(bbox_loss_list)) + landmark_loss_avg = torch.mean(torch.cat(landmark_loss_list)) + + show6 = accuracy_avg.data.tolist()[0] + show7 = cls_loss_avg.data.tolist()[0] + show8 = bbox_loss_avg.data.tolist()[0] + show9 = landmark_loss_avg.data.tolist()[0] + + print "Epoch: %d, accuracy: %s, cls loss: %s, bbox loss: %s, landmark loss: %s " % (cur_epoch, show6, show7, show8, show9) + torch.save(net.state_dict(), os.path.join(model_store_path,"onet_epoch_%d.pt" % cur_epoch)) + torch.save(net, os.path.join(model_store_path,"onet_epoch_model_%d.pkl" % cur_epoch)) + diff --git a/src/train_net/train_o_net.py b/src/train_net/train_o_net.py new file mode 100644 index 0000000..e45105c --- /dev/null +++ b/src/train_net/train_o_net.py @@ -0,0 +1,50 @@ +import argparse +import sys +from core.imagedb import ImageDB +import train as train +import config +import os + + + +def train_net(annotation_file, model_store_path, + end_epoch=16, frequent=200, lr=0.01, batch_size=128, use_cuda=False): + + imagedb = ImageDB(annotation_file) + gt_imdb = imagedb.load_imdb() + gt_imdb = imagedb.append_flipped_images(gt_imdb) + + train.train_onet(model_store_path=model_store_path, end_epoch=end_epoch, imdb=gt_imdb, batch_size=batch_size, frequent=frequent, base_lr=lr, use_cuda=use_cuda) + +def parse_args(): + parser = argparse.ArgumentParser(description='Train ONet', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + + parser.add_argument('--anno_file', dest='annotation_file', + default=os.path.join(config.ANNO_STORE_DIR,config.ONET_TRAIN_IMGLIST_FILENAME), help='training data annotation file', type=str) + parser.add_argument('--model_path', dest='model_store_path', help='training model store directory', + default=config.MODEL_STORE_DIR, type=str) + parser.add_argument('--end_epoch', dest='end_epoch', help='end epoch of training', + default=config.END_EPOCH, type=int) + parser.add_argument('--frequent', dest='frequent', help='frequency of logging', + default=200, type=int) + parser.add_argument('--lr', dest='lr', help='learning rate', + default=0.002, type=float) + parser.add_argument('--batch_size', dest='batch_size', help='train batch size', + default=1000, type=int) + parser.add_argument('--gpu', dest='use_cuda', help='train with gpu', + default=config.USE_CUDA, type=bool) + parser.add_argument('--prefix_path', dest='', help='training data annotation images prefix root path', type=str) + + args = parser.parse_args() + return args + +if __name__ == '__main__': + args = parse_args() + print 'train ONet argument:' + print args + + + train_net(annotation_file=args.annotation_file, model_store_path=args.model_store_path, + end_epoch=args.end_epoch, frequent=args.frequent, lr=args.lr, batch_size=args.batch_size, use_cuda=args.use_cuda) diff --git a/src/train_net/train_p_net.py b/src/train_net/train_p_net.py new file mode 100644 index 0000000..bd69155 --- /dev/null +++ b/src/train_net/train_p_net.py @@ -0,0 +1,49 @@ +import argparse +import sys +from core.imagedb import ImageDB +from train import train_pnet +import config +import os + + + +def train_net(annotation_file, model_store_path, + end_epoch=16, frequent=200, lr=0.01, batch_size=128, use_cuda=False): + + imagedb = ImageDB(annotation_file) + gt_imdb = imagedb.load_imdb() + gt_imdb = imagedb.append_flipped_images(gt_imdb) + + train_pnet(model_store_path=model_store_path, end_epoch=end_epoch, imdb=gt_imdb, batch_size=batch_size, frequent=frequent, base_lr=lr, use_cuda=use_cuda) + +def parse_args(): + parser = argparse.ArgumentParser(description='Train PNet', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + + parser.add_argument('--anno_file', dest='annotation_file', + default=os.path.join(config.ANNO_STORE_DIR,config.PNET_TRAIN_IMGLIST_FILENAME), help='training data annotation file', type=str) + parser.add_argument('--model_path', dest='model_store_path', help='training model store directory', + default=config.MODEL_STORE_DIR, type=str) + parser.add_argument('--end_epoch', dest='end_epoch', help='end epoch of training', + default=config.END_EPOCH, type=int) + parser.add_argument('--frequent', dest='frequent', help='frequency of logging', + default=200, type=int) + parser.add_argument('--lr', dest='lr', help='learning rate', + default=config.TRAIN_LR, type=float) + parser.add_argument('--batch_size', dest='batch_size', help='train batch size', + default=config.TRAIN_BATCH_SIZE, type=int) + parser.add_argument('--gpu', dest='use_cuda', help='train with gpu', + default=config.USE_CUDA, type=bool) + parser.add_argument('--prefix_path', dest='', help='training data annotation images prefix root path', type=str) + + args = parser.parse_args() + return args + +if __name__ == '__main__': + args = parse_args() + print 'train Pnet argument:' + print args + + train_net(annotation_file=args.annotation_file, model_store_path=args.model_store_path, + end_epoch=args.end_epoch, frequent=args.frequent, lr=args.lr, batch_size=args.batch_size, use_cuda=args.use_cuda) diff --git a/src/train_net/train_r_net.py b/src/train_net/train_r_net.py new file mode 100644 index 0000000..a375639 --- /dev/null +++ b/src/train_net/train_r_net.py @@ -0,0 +1,50 @@ +import argparse +import sys +from core.imagedb import ImageDB +import train as train +import config +import os + + + +def train_net(annotation_file, model_store_path, + end_epoch=16, frequent=200, lr=0.01, batch_size=128, use_cuda=False): + + imagedb = ImageDB(annotation_file) + gt_imdb = imagedb.load_imdb() + gt_imdb = imagedb.append_flipped_images(gt_imdb) + + train.train_rnet(model_store_path=model_store_path, end_epoch=end_epoch, imdb=gt_imdb, batch_size=batch_size, frequent=frequent, base_lr=lr, use_cuda=use_cuda) + +def parse_args(): + parser = argparse.ArgumentParser(description='Train RNet', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + + parser.add_argument('--anno_file', dest='annotation_file', + default=os.path.join(config.ANNO_STORE_DIR,config.RNET_TRAIN_IMGLIST_FILENAME), help='training data annotation file', type=str) + parser.add_argument('--model_path', dest='model_store_path', help='training model store directory', + default=config.MODEL_STORE_DIR, type=str) + parser.add_argument('--end_epoch', dest='end_epoch', help='end epoch of training', + default=config.END_EPOCH, type=int) + parser.add_argument('--frequent', dest='frequent', help='frequency of logging', + default=200, type=int) + parser.add_argument('--lr', dest='lr', help='learning rate', + default=config.TRAIN_LR, type=float) + parser.add_argument('--batch_size', dest='batch_size', help='train batch size', + default=config.TRAIN_BATCH_SIZE, type=int) + parser.add_argument('--gpu', dest='use_cuda', help='train with gpu', + default=config.USE_CUDA, type=bool) + parser.add_argument('--prefix_path', dest='', help='training data annotation images prefix root path', type=str) + + args = parser.parse_args() + return args + +if __name__ == '__main__': + args = parse_args() + print 'train Rnet argument:' + print args + + + train_net(annotation_file=args.annotation_file, model_store_path=args.model_store_path, + end_epoch=args.end_epoch, frequent=args.frequent, lr=args.lr, batch_size=args.batch_size, use_cuda=args.use_cuda) diff --git a/test.jpg b/test.jpg new file mode 100644 index 0000000000000000000000000000000000000000..309e1b93fd3945788493ca5b4f8d18da424c8ced GIT binary patch literal 77016 zcmb5WbwE^I*Ef8Ip`=5)yA-551nH8Y1StVQK*C{2i4jz!8)N_x7`jVJN~IfyZY4(? z5h=-c@Vf5%d7tsv5q*52prwf8yfwf0)SwGUUbSE~Spj)t}d00MykE$k0)wE(CA z9CQpU3^W`pOe{QH9Q93{!FS*x^aj|FEDE~PF#`gSguOLbsQ7VqR zU@;@=t9gI~TaQBtrUaCMpZ_x~fPzHj4#mG^YVKv@Rflby|Fr+-8~*nT1P8E;+P{~w ztKf0u8EFk5KqPQ_c$7nYh4M2nhTvWk#j?Y*%jACN1Kntc!1)2du zOs>cd_pq_v=KIUED%6zLks%nJSx2GrE!=G2yIP}MS`z#qCR$AjNRVnc*YkgRbQr$-XIpYtR>511D zR40tFOJOrFlQwb{ndccoH@ez4FF9g=gsgkJ-5gQ#B-%5A;OPYYs@OmjDqD4)UCGsJa$#8_>RV)@M0;!~r7c%^%xRefy* zxmeP%Tg|jj@{derSq$$Av;cN5wzjEXUMMXl*S(*Ko%>LT@5D6#9WN!guZ5{9h87SO zuSu9x2=Q#Hp~a$3G3tT)Wik!Xb`$unJwCVFBE6^&tF7?lg0k<(y6nR`JAHyl)}2i~ z)nqN~)-5vSB}hwSncYw$ZA2k{ON^{Zyf%61xPvILI(9<7$b0$vt{7hlgohSC9aV!u z4JaX9jk>-nHB~vJrB`klz6r5Ek=-sE-NpaY2o71z>;w?U#LvS?+TZi-0U}L!7q6bT zvIlg3#sEvHKTzGx%~GwIy<)39%15}n$(!ne&(QbTanm1#0gUtsopu~>5;g1;C%(z? z9@olrT~!!y3g0a>POlNQ*^tbv1t$c;2qH8VSYSVk~4_}Iv!<5p*_llF!1FRDPuyc#KBDiU1{A)q9w{64n6ms|AMsu z_!2sdILQK%{e*UMalgcb4R^$ONRT>=oE5Zh)EP{z@2IM=Peg1s?0V{!ydobyV=@kK z7M_mC7|WA=84;lh39}_! z)9{gz*w)+qI^tf+mf`SzAnEi9I6>@tyT~>d;C`I#fIir1)t2BZv8gZVYHd=#h!NT@ zb`@z?IMkc;(y&-RaCq1L+?>GR<2O23c3uBX=*jlA9vK^*y&W5DtQjr*_M~0B3u69# zj5B<3p!s3_2`RW@stg5qIE1*hYl?! z&6CWe3JeZElsop&_t$fuFH~-sL6TAhYl~MV9z)*Eb{9lyY&2d7Mpiz(R{%@CC^%aV&nQZ*b`~O2d~|TSOoAebVs0`pK(1#tJBhR0z6{_3heyY8Os45>sJHCsDSC=1YQ6r zjEbI=apEw8%T;h6T*>9PO<4_;g9AFN38Jz(ye~D|Lz^=^Nq0&8Mvs-(U6EG+tRtCa zt?flertxvLsjD#Wkl*V)so$lSN6Jc7Kb@F9`!oGH41LasGP~GP+WQUcoa@^zvPG4h z#d+RQ=#=ZAGjCy>HrQ#vTNDP{zjidS7RpBxWQKaRVmc4tzx{Nvk1fRU3h)Mxle`PP zerUg4LyidYDkZ&#uQ^s9+vPq*g$RPolc6h~|$LGFFu{sB9Ph)Chi%`-sX0W>=gY#zDofKMzjFraP0c!TF{Q0Mz0;VIH#Uyy z?L0U4%h{o^RnI)*z37$H+ojf}HJiX759?3gn*h8S7(E66_i7N`KD6z_>HF-4=}7nk z(w(>qJxEaDOyuDd9P7tgHjYS|FrwT)eG|gJ7aZs%%6j?q=nnP#7^BU|n=U_f^{e3H z&RaYMwJKMm%4;3h7n*E$6tna;2CJ+oQ}?5rM{mG1B3F{1%!x;TVL4)0L?2RJRqLWb?#bUf}X8+XY$4k9p zb?H$xYhxSOFVskEvMg-TlXVE$_A+NG1P61TVP2+VUPK=LWqRp5@5QKPN=E_t(K~}G zx#pm3xnFPSS_E&%mK{?QE^kR8PEv_ruEwzZM(A)1<A<6QO z^;nt;`Cnl(1j1JUW0=jWeKld5_+~a7A>ZQ1)a78()t!N2o4wsjRd6lYa)LCt+Q*bvE=whkSjuPO?wuKF68m&a_gZBT&%~N>jtQJa(IvDqHS+?D?$TB8QR=ZWN6L|yi8;c6f_M&Q)dYuha^TN=y1%#I`+eJ{7%sp;(YfPHNN26ebQXEBC`q|c4CWR+cSelDteFBrKJ8z?2d-^})c%a!e!AVY zWKRL#YcU>S+YA2)^a#gaHm?qFtHGBcVG3}=a@F*!cic9FyEfR%tLde%?XH`GJ%wf! zs_aBNgNHv>{jE8RTwculqg(wkb(ZXT`?r{?&(izm_IyPl#r9fw@91~ukap{p1NVJb zxwt=E=our@@#9ke&?WEEq^>-(K9<@K4j(uI3$?HLEzS%akqcpO?d;Wxx9#6<#9A?Q z!Nb`8_S@hiKbx$NCH@(P_;vl@e)=P9zYI&q#O<5!AiJ!PUHs=c&oI~q1=nK~TRe-0 zwITxUZm^3lp1t05*}f*?`pedv9+E@q|4oZ z{yqT8CTY5j%1`@alx@DO8?#ln3DV%PgVi3^(feZ`>v27;U!n^JB(QwJETJj&Y6ClV zM8!OvL_eFPCR!iM%x+cDwcg?=Jr*zv`C5QCsP;^qAV)YBYe!+U%i>Pue%yQ))a<+l6J1l?f{`x_W* z*05{^?(TAOcrfzcL44U5Q*leg$HNO9g8?^zi$8CCF?T%q~hC)>tm zH%obAd%^nbue5#|Tmi^A6Ue74AbGSt;?S4xlW-f8Zhh!QqEX@cWnt2hpF(5v zo=|^C-iW>XbJq=_cZ?;HLm4UGrwysyFrF5@9=TV5rQ71jtE^-hYn`OQWZ{&}%`}Wf zv}8q~fAC%Fy+?d9`o(+Ev92{=^>S!t3U00ub{}Y_my8sK@>UsJUlw`u1q{OSlg)o{ zCiw~q@f(%!U%pXJnl9+_*k7%fV>m%{P=;EIgyRUJ9~UE_@kQZz6t3wRg8qMK-FaN{ z1`$V+Ru8(&P1X*K8cs7V1qVe%XK2eS8@JCg>QcX({-%YXJYF3&cq@~oJE(Ud{bjSC zs5@<_-7jE54hOl^7_UVM?Ql%REG0QLV6XqG2D5PM+BaG*fZ^c46 zxpv@S**+&!QRMk%`$><wP;eOrJ70{^}O<3H@<-g}lldQGp zRQArQIv$xpTd`60eDQq*{+Q^CdJ_JU9od4NPy%&Z?NQ^gOJV{+=Yq8>AZ4Y0OXX1F zegmG+ek@nXsh(V>?$}UZQgq~#5#BdpW3vgfR9#*qly=4Y`$l&*QjFHkhh5GQF7t+h z+XMM++RENK3J(`a3j*Z7evZd2yEP|j{dI^a=&=uVEV8`gg{<$R=KW4+Bk_y!gpCj# zZ)*xtdzYkXdQ??kR@F;*WYK#6`HfpNkd+A6^Q8G|Fol%R3l*(SBu*1Zh5g#;yT%h< zx%QfhzvGwN>E1z>!;rivtNLP;dr~>4rh8Q zq#tK5#{+6E3RUpiT`zJstL7a6H|#Nd_!4!^ba=H;UrYLF9C3;oeG#106rVT(qTtC_q#wYC!~_KC2WL#}=8`-fEt@>}2S?+t{Smhk(0rlKPn<U<)tYe?Lz^Mh4&%r#xYcAr2Y(Fap9p<7daka{|K z1@IZJK7zbtMxyd+6Ifx?e$9|>1*O76*e~QRa@in2cYDsG@ffUiD&l=Z2eR>j4dptp zW zI^`eLte;LUIor`H_Uv6AUt4-7MM0>c0({>$}$v$~m7i z+8!j>^v1K2`=*${VD5u)`W&NUBO!&q55#eGZqrdqxKNQ&%<&iER$JYOr6pi35VMnA zv&!#FbDXQ-Ij}itPL?d%uBHo#v2gIXxUo(Er#K!?ZZybTwoPW!30O#*7@Grcna(&JJBGLaTU1KNz$hih+aPUObWDSbC zhVD-YaBxn5kXHATg!##$bheEx9z$07`WS#&_`TlW4*M2t6(Hlo1`MaG^ro(&7Bsj| zvdNmk`j}?LOR9x*usKeJw6XH*atxd_=nJd8xS5?%QN}Kx(Gfe)AsRiDn0*DT+nN3G zqfTjkLr-n`p;=4<;coYe!Izz~JZV2o5A`@G?@w&G(c86*ssz~j39~@IzWiP%y0}6L z!NR~l-FnwDRgcK%Ie5i>qb@voao<}avurk zy8_NNGD1qQGgZ}HzJIpJNWtn+rT>Wsf*iVZo-JB$nmBB1ts$w1LC; zLXq!x@ekprD=k`Z6!(TNbkH^0OwNZH+cEPgyuj$-Ug1KkyWU|5!TU=a^^HOdrYY#*5oG0TOAM!RihmFbqf4{L zlE``&QsyEss_ zQD^i(Gkg*Gvk=*;&dRuYSdTGyl{`WvC?^<5LRtTFV=MW8P4K(7f^R%lB2`)IG&#^d zBV(qty@|YtQfTZi59aSj^uF?90(#BZ=?Pij*a=1dVtJ;TTGD^xkk7LjvH=i{Hd$Kt zh~0SDQq#7C59qAy$`96P5%C3eJV7m7@BmbWSHKh0MaEq??Z({elT=4q!}m8#;xPg` zx4&wo6SjxJ^2ZZk#^#9&21}!IsX_w_aZNkBZR#xK33v8+u7IY`dq;!Cor~rgPwKfL;J_fH|@8516%en$LUwR>wzw{0Mutud}W~Cm!g_4;M4aAf%GfD0ekmz zDv~HY3Ls&O+s%OT*~bDqu}3%O>hoD6JN77(!VanucqP_P^qR(9ZOv|^ekDT7c)-{U9mFc?XDfLF<6Pf&O z!{pldsuMxh{M5Y>?L)cm`|S+* z#-CYjEuOu?5NK>H8a%^z$6zfnS&^e7;IgUo={L73ATVNB>=E3ZP`3;2Xo|2qgTJQ7 z(C-4G*1k707hrfKBV^~boscV_W??Y}v)e*K5V!UMn;Sa<0G2NRHWfBBSy$uu!>@^s z;W=$iO_I8X_6CJJtvg{Mv@}v@S}GRSp=ItPr!%8xDv(-2o}<8%Eldn=spRvEtMGJ#}H_q`YsZ z?VnM6?PA;f{hz(HMa^2X9)b$fE;`oH`QJxt`dRtvvCJiraEG!JR1aM>aN>OHlbECS zY@xF>oI0rk)a=W%e%I8;tA(3Tk@Qvpk%-%2((3H?LSLl$Ju{n!!I~TG8%ua&lMGEt z>rY*lH;41+Nxdl7>5yFgM!Mqo{HL!w*#+Zv;&4Y_JMReH}pBly0?T2g*hLb)%&uP|t z*DS#SeE`O$SFzJv1^Y2XJ5X)Jz}fDSzrPOvqLC!*0M2Vs61W;gCjhz$+Zk!-8ZP?A zr?ZOtbIcAG0OQ1C8(otf)_=izL|7gC4+&MS-r*S$D$dqA#_1d#e)Dl-mQXe82b_?=iOq{@-M;$|t70c0Ix9L0;Oq*Y!5`c@!&kvs-WFZN;#WSi z070l-D+TZq^LJFD010G({w)z}Qe*`3Jx_=Of9jtcdpJHj)`J7=0f`-$?s~u#fZMKu zMfmk(&SKvB`~h`X-iko013g;UCjfB7s<*?ZT3%2eyle@IbN$6Qp#Pp&sk_vD+Cb&k5GXZIT@>n;tvBvLI#Gek$EV~1vLoMn!V^_#Z87B@~Y>ia2)8 zUkfr^U7r&9Zt~}`#?6aolEqEpMc*SE42)r#kiu* zSjx;dSQz<>qN{WvVIfkLfSQh~mokxw}yXebdyNjEfHT`O~ z(eCSa3kG;t-6is6kdxvN??K^oQSn&BsS(oB#iNDh;m_!8x&=SCWMfHAJibVGGjqA( zz!9xYu~Fm7gfgUdJPthW?h zScheMviS!LW^u|Osk#>B3?Yo6&5W0v=|AtDMA@flcnBv^8B}$GK@Yg;d?R4d$vw*35S9(L*x#{EBy{X&8q5}PyjJ`d52rm8J zj#hgg8x|Y2+OSFa8 z2e)6-5j+ets=brN6cbI=R3zJ@@4?aEZc*@L9+_S%rn8|cba?Tl=fcionrK{TI*Lvp z{k`EYN^Z-^hqxX6-JX{haTm{BE0=#gT|;-AjTOJzIm;HA@D?##+zClCjq4!!Fdso` z;OOUXdzr^+$>3z_s+ttMtP@8RQp{wst*>QV1R+W_6!mL}E*Xp3%}F_^_Q^DAG%(Of z-BHd`1(!cJh`${R35IXZD@N!b&&UUY;jiAPKYSk?Rs7-GY?dEqOf&~f z7eEQG*_Cgi3U`apa zdTCbeqdwY&MHgLh3$c)VOQm`Zgf$js+Vv%-Nm>$F{IVguTlmIKV#U{5*-R_4KuKl% zdY$^0aDmsVsmJdO_ho_0shE1&-E$9r}O?@lL z7QX$I@nJCmI~&kx0fMs${znjY$5A&NAdZ;DXUDD`*8zUHJYXdZ;56+6phQPI0E+|t zKSaH5*NL6k1f5{O3Wsp-4tD)ixr6l(asOT?bNW)IRsO}ZR?l^1tt<>CQQGd`_Bb|$ zuX(;Rf^<_vb#qk!Iu!tv05n+vV0Ij+ZWk?!tI)g!}n6D zNl!<77+0H5En+&Sg?xTuOcZZ!%s`3UM6Q4~9JW-pV@^RI-P7F)DegVxH~ zYNoo031@w#cW#O-w6N`NSHAm6{-Q1co*2)Ph9R>@5&e}GRZ@+c9Y^`}m&vYihx0#} z#A&{S=yTce2Io@;R79qphb&ueWGZPqW#Ou^N??1vbd)cT{V%ZQNxnN(|_X{$i+)Q)aux z%vyUhcIyDvKMPg90(9d1z1flrbBDAYl9ZW^xs!%f17k8a#8APQ}Wi~LzSBrCIkyE;Ej&w(GZz~f|U zAwL%T%MN!D8+ujynGO?${CHh8EWQ2pMA`zFJa7fM()L0lY?LO3DF=@5^TK&HSj8m``zbpjeY zNBMmZot#~Le+86g%KOwA#I|+*c7^swaJ|j#yLq8e9~|ny(_g`6I~yy93z(^iJH>}f zfvp%JXs0I00u=E!aPsf;wFB6LNDSeGr+2#*X5HYNMQAJect4mZy!ym}r(5CuX*vVW zm=Z3sCHEyaxEfDFl;fCCjfx&rKogLWdEea5_w$Rgc6H~cVreP>y%-$Nu?bLm2=byQ zRQe3is&N4n5A1NN|3}5IV=w@2hxIE7@soo%>L=@PROXDVhjQ$&`lj~pn6OK%Xe7K7 zi!lIHtM=9!|E<1eIN)%*zZJ9&wHO@GsxWTVkhw-umk=wXnUXc;ls3W{C2<=c^(!D<664rp z%lkVzt;Hy+_35r9qeSI*lrH)sBtRnm_7al1ZR(6Q+uHYC)U%0k^Ny1I(JF|kbGuc) zBk2ryLf-Y!KIOst+*rNai-~Tbl;;D6>%Y_4x_12Itm!<8vYM`dpU^yWlAUdZ73b%ukm{X{(Gf_hNU5xo~SX<)pbd==)HG^pq#&sfX#(zKW*(KA4`9k z<8*py)tN==4IO9}60;yR+17xZ-|pSsm@H~P+J=oMM`ZGUEY7&(G%fv6Yh=Z3B2#ub ze#%J&%@2^TnK~Rh;Z0v>G{|&K!6fMx<3iHkW|dd;&p?(<<&`3+4@^(wc@QR-%G)W= z?r*X|1-_*&w#cLiJJYUX{`4cMzN*(}7A+cGT)Us@rLN!E-8HgYK74InA4dgoNz5j< zf=2Y)^^U~~`ZLXOCLi-*9NVIef^{ULtu0)C629r3oj@a6?Obqa1V1HxEO1CGEH!J8 z^z=?MF!oDs<&8p_#h{yoGUTFbsK1nJ(-ggJy@9fh!SEV+l35ZobPb>-JVMM_Casx$&I%q`_#;Y%oY`F51ov5-PMX+iV2DO zYZ+AS@avnLH0s;fZB_99&hG02uP8`M&rY2K4ge&$6OS6}=sTnPVNIxX$P45tFzt`y zd&DyTxlaw3dvf*B-SHw#Il21pAQmGtHr#HRy#*m2;6)7F0u+Qdpk##=hefro>00%$ zTJ)Fy=kT?PI~Jioz$-+yJ(0ShID}_7&Ym-c$1ypWgohBB#qltU%~9ieEcroklCxCD zAN|jg_w^AWH2>Cv?6BzO-tDURD{2Cp(vnV)gPV?jg@IL?K{Uc#d)kJR z6x{Ra$S>3JBt(eQ8wKCv`|}5Q<2OIAC%QFg^05}kXy5g!^=mh{ZMzv96_%s?BsP$@ zZbP(~rcTbH;fv-sdjug=fO)#&PL|%Ud4#s*DpHqH)1bRvbvzX?z73kssqJtDX>k{b+m4sa{z{ z@iqM{Ub^TCc>XT^jnmDQ<9FC?Nk8{Z3@uph9EgOcm&cWcrnTQNlNt+oej-_VfO3`9 zmrK5AqfCpqFq|DV>)3ZRaj{$si`%vWa9CTYrA1&{GYy5s%zCL{kPou0@>e8)O7TPXU1~ZP)_XIv=WjQGR ztY{KUuMGX+#Au81)cwtOIjyXDn|$Pqh2m@DnL(g(_K{qPb{8tiisMwQh>p=L>4{IU z30-xrtV!VppKq`El;$e_1x*N9*~QIEvM1V_!^!5gt8)Vw(#vs83GOY3m=mnctP#4s z>BYjYKfO4iRouqSb3@i7j$yipeE#tfE_@0V@q=(cdUc?$_yyFhhE9J^RwzRoiC0qrBOx)Yzh$>Uk6{sYds;#5o`n}aj{A5|E9 zSm;2=y@gDoV0IjmZYS1oO3hu70_#aeM`jDXNIu|zJ=S-xdm&9JtZ?;%3f_eO`M;uC}1J{>?46($NI*`yZuwO+C!JM3ZA@p6M7E z?EY2f>yf|yXK@DL6j(9DrSug^2u!C6Sr;6H^_X!0VmR0Z+KFAeCfN%Fx=0f`AG zx%dG`bNY==f_nr8?*u;cZ^xK8G|=y~@!gG@%zmqjx-^J{K9W2Xe}+gF2(+k4Mn}b5 z-z zng0IrO&;~_;?k){H>8rIS1|?eL-Zd^)l2K{1hq0-20uMG4NlON_7i_sSYf?T*m;09 z)GVBxUy&)}eTMoaFjbsSSlrd#%cD0@IJ6d7m7tgn^VG=dUaO*#L7QdY%NMZL3@`g^ z;~3}Hv*a@~?j4nbwp|=SUjb<&j3FiG=`VM>%dxvxk2DoJ%=;%RuK-iFm)^P=OpXOJ z>ud$ME$5M^6fCfg+d;%vfSEEstV4jvreQx)(enGurHS(L%lee!iqi~ak`8yfE0sg=7K4qNcS6i(5sl}Zp^GLR0zpJor#zQm)ywO%`nKO!S?bPZ zo!U6U?w`+H0S)ovq((1~j8BVB3&uEhDlR99uI+`ursNecudE>v$~}*C_>hA9?ODK} zD)x#=`$5ad6%Y~f36Hs_v8zEUD>roBSwk=c>M;|~IVpMaGPRTclP3gGh!XZ`@ysnX z+ty9jbu1qKq?MKfUEZwZqw^B4lPqj*a!)t0qTa>#HrVvw(@V@$am*v)%?ysZAM^sD zTk|kD)`VJEVw@(%{*0z^cCMRfxzl-L<-K4?PtB`S#y=t5%$E;uL!^wy7423yAAR%J^=v-zmm?NuLmangjznt9ou>yu0|*rzE_v{!slO=0iD9CiA_XCLCVf zW@E-R_}BI<7DgZctY7|eu;QrkT;xMFt>OW#*X|OJQ8+!-2NdJaR`I_`d&H<*Pirjp z-w7_pS;q%(?_Wbm*WR)n0LOoMhu#lY3C`_%-z>7PS>7d31@!>HhL$*A2yYQxXb=nm zofChf)W1Uh?*lz{h38B`rY2JW^6d})RqMY#_kZ2*8`#Jei=AR$!C{ernYxyh@nA;W z)wicpm3$95;UT--sATlbU9`Hcz{gZZ+1{;H5dnP-(Yhe3>5p8RHu*G93aeedvT43i zwFOyST3rDY#i-OwUtfuQ4Pxu{VQZ^4&+)Hs z?>QLp8Q9`AK2!LleZ;>)dB$6qdSu+Z%wTdUjUK~M$iE7smJ)rAP!se05lj}a)&TvJZ#*LDOqTFVOe-;0%KFDk$O<&}OtV zF%nkQwW;)vnV6t_c~CZ#nwLG7d|p*;as{Ye0ey)sJ>{o4boY%pbyr~zIfBJ3wLW6 zjM7c=+MgGASWR1QVsxXRAL-Z%GTS90FD32wa-l`wm}t7yR)(z-5toR$kQr+;Hwdo4 zN$+9G+xBl~aud~C^6Rv_dRcot(kFDt0D9q&#l$86>%yT5*w~;Rj^-&#vyW7&r7|8C{YAvHBm24*$aFlP}Y{#^_(QIhj+F3R_}^IW2P?x)kfqmZSXRw~TZA z3l*ItQ?{0eEO&PfCVv7mflo%y?h}8RxwJk*diylbe2qS>PWR6$C#zsp(A?Zos+F2D zvDrkiG>ipf!5-X`829q@Hu#33U_cSwZosr65gp9IJ{A|MhwXKS<=W0o(tjD(6i#%oPLPBd8b+S@S5M6KSdU^!Z zi+pRm3*+7eMj6d{7PQuVKuYXI{YDmezyn70O*ci`&RCx9^-QE})m{vgd^L)g)W}Yf z6><|Y4an480DMq!(T48hzj-iFSD~KTdY7IXzGkw^559dq!O`cxv=CjzLj5^1-xR95HjwrmHUMdx*vzbD6Stoa27rk986v7175 zYCdFMI4Y@iix(TtnkY|wUaz7ytDw0k&;(y-I&Ysb=QMq{O+Vs!usvCHo9015n)kq~ z+mg4ZE=%Lz9Ld3kd$l3Dg@$6t0$Gpb)~ChaH`8Z*l*g`seX8@E>h;XAIq#-KHx`jn zO@r(P8-4GF4C7ou;or4OlO-@^lr9zs>AX;8{*_f!3{7s8gAH5(Z*-5C6MW14PF&k8 zr(MtJHAyD(KJ+^t!lnaVJ!glXlr*;JY(c_`3`C>4qX;WQa70@L>AaGCS7G~0Gb-Xn zA9o$x42G6zjr+6A&}Bji;<1&Jp1Aepv35T#+K+j4Da~=H2D{q$tpl~=OIlfAUq5Vl zF;5nzL!^pOI|%E!oJkZ(%B?@QpVJ*2;3jW_qdIO)+z}*gLNrS=QB6I1^`d78vJe#6 zRL7od@d_0%TW}z!UFUn{gGbw`n8o}qA zIYu{EL*oz=brx(Mc~{oZGGEl znU=U8{U2Od?)fLVgoPVyv#^Oia%_3~snj*!cm35AWBu+9%FJzIXctvJrh)LZ*cK+2 z3vW;xi^q2}qG@6FKJW3J`g$aTemES87c;Rf;C0_}GqDe?-(4~5NP0xU)KkARwm|1r zC(z&gcro-e6l%*(LVqV1px3PGR0@w*w&QNG&0tF+irCa#C#w@Ujn&&ii_^9|3DMJ@ zs@k?)tWaLhb?un!n~=%fNwn6ApgNIv8G{{4$aU{69(%=^j3$AcM;E9!r?Qyr$$bI+M6uqcb?5BU}3YQ$;k zo^w*oFZBz4W$#2)Q^6ili=jXoX0Ii-{~!>VqC03W z2e(Ej+BDlW&XmV5$-%u6z3N_c5Zr62{pf8#l9gTN2aKL|Ak}nFVP^mR)cKlhYkiCH z+2k0m*Ukko&ho7oiz7Lwyfq%+QE@V8ru`%;;S=ktCJj z?pXCoT|ZhmP=Sn8mA7@A5NXClhqq&n1;dU5!c$fQ;@Mna8X%wWE#y_0aVtT%IQ-h-r%W3y_%>H1hEu%H98T>P+e zK2M-#cJcX6P~wBybllo>JPqi2)%d&h!K55yl$Wj1N}=t9jLUu?qvwE1=@VZjY`~Uj z7tvoP9`gQ%m#Re0fBvJuUt<&%$#V}Itr^q9LC8-(yw78NTL4D!x;XPaD@!+koXZXl z;C}(DQ89vD1$V#4##s`gtpEa$7hu0?Z^!zXiUjoA?v5b#U9A5vVK=VO1Fv}}Hj>bq zuM%#NWSf83VX>h-A2yi#C>cX^li##tr-<)J&gI12Wm8}6yl&D2VVl9ezw$FN+l{&Y zTq-3P3cUgdcT5CSZ);?k&xV)>=>EaOtwr{U6&>`%KPxI z&Ty9&Eio54wFNTt3{Ab*`%>FG{xSgEo&*VV_fo8Q6x!)h2R4f-yRkRp^vJ>{S=#UU z5VGoqh=Wl&1ql(w3@2W{5%$er`HU(fh(C`jfjra>{j;ROOK`%CaO9n!&@rV@>8p+R zrBypgPW2)pWTjP~#m6_}3R!0)$AT?gf1SM;)zIHARm<~}2=n}G6D%e+>6HE8jB)38 zZUx(OxWvk9z90cFZ(+(wUG!c{^wHoIur|!5#l&;K9cZ${bN6A*rg0l))Qs%FG-f)V zfH~#8bYI^DGpWR{hvi*+&VF<-TcQvy9pMk#BFSNmo7twWtx`V^nUkRYNdpp{1%KWg zJlfIBByf#hKfu11GWE(dn1II6mcYmGZm)TY4J66@_C3xhiNO&>J+iG&vb3WFo4vuS z8VKbcEb1~k&<>(HQ6Ybc{h0_qfGbgGCR}i^K|nNgsd30yDExwH3W5*vK}rCJd)zbU(j4qJ=FD6L@Wi~7OUUN$?$%at8N zUXDt+X@%U?M@$$8hqFc(E+GC(FoSEq+iqF#dhHoz*-h}RxNzS`Fd(MMucM+)_pZTy8s?M(4lPThf3 zHY^&EaXs`WvxG}GW-&@{fyu@(r9y&7CY@1siYN3-!A&yRzf>fcKpVt8xaq-e_-CWF zQzk#;C5M37{8V*DRcHy164^SvgBVClJ1~ExnOBBVmBDNc;uQIj*weL}bvO8PAHP+L zxAG{4sO=m5$`xnAW`sD_TY5G%@FT8yzesj6yc!Q*sa9 z6Lpp7zZ8GlX9m*cv14`u3;N^?yL`2xWtY-%3!_j27xP_Eb<@9zZILjEjeAg(nz$rV zzH0lF`F7Oy=ZJN`DTYQyBDvCw3jsCp6CX-xTPsOfr=UL6e>}&SX|5R*#;$DS(#C*l z*s6hJ%%z-yVLjvW&!Ud4OxRBzIrH3EKBDwAV3y;$(v`*Y3Hd^)om%^~yR)OJr;nV2 zAHJga#ZkLTpvVcnD<-$D90ct45TW0{ zNl7%i_aPD^Y|NK@*>&`w_?b7_1X{`aE$9_L{%FM#7MpaR!S5cv$D#W*!F_5)znf0LX9#2u(1%iv;y(`&Pm6 z?u2o)idgy3i?cTCoaWKFqvs#9VZ|(MuDjOKzl~BsX_PB@95)8g{m@JMzO-%WYHj>) zNXjZ-@zN+p>2~rI!&cXhTu2e%qMF}7*{wGMU--P@jcn8{6`Xn<{QADH-UD7@ntS8? z;6iy-y+0xEa)IPgf9Vd@gi_-18cL1pORjr+j@s#jhyGjTFZ2~}4G0D@XbD3y;~2&9 zspfoJs&TQ4E-%Z|>}hDQdfr^(Q5DjiFXjVM{hVASTRE)l(b5NDh-RC~yw1s@cp>At zvn__nTQ49X^(Bnh%#*CdWKn3mis{7W($z*+Zo{s~XHMui?ms~IKLxrL)rDmJ{Vwrh zu$TeLRAI@3htG-QWy;x$KjppWae&EP)v~1J?Ih%}*PeAs72yo6gL%nfFreIP>#oB5 z?84vBJ8tmALM0{Md11kj%%_WJ7$%wSAQcRi(Aw{aPp2$d#5u69M^mL84E==?X$98n z3hHbEZKLQM0am66oQuz30n1w z%l71X2QW^JKd(O)rLe`D{ayYpWd|%^j#6nt%{Uk&GWGoRd!%vVvg8;h5G5(sE=2w?8hIF zctS27{D{~hykQ3DI&?oc0b~f`BZZVJY8u1Ld*hY*v>t4J6KT!Latud76nYF_T+dGN zv}O-Rfay+un&B(tLgxG;k9*K=PmiHtG9ng)p4;k!Vp`tLmI{&aHQh~@7i`spAMCT0 zl&q-ZkD!&xv{QHtG#v4^H`$v_=u+PvI6Gs2n=Seek)!#Pjmu^!t__`ko&1~19NcbP zTWr+Q`)=sPnE6h6%J@~YEct5l6UKL&AHA+=OWi9=2Z)c3b#m$k3zpp?yQc)H9ksq- z(^GAEDz@5giDHdX)4z$d**ftDYb3BvI@F7a&(ot-%eG7ARBc=$n z`2alNbz0uYR|l`;_Qh9V+oVl1UPT>(9aVE}A9E?I$($&a3OWOY)3c7)$|;TZ94>u( zV=5BWbkmp&7KqUm$j7aL9`P1Z&;kw+<0hv7&`YE+QI=fCua?_9?0yyikkv{Ob3z6R zPbMm+oS4Xba1WSU=sJpcyancpCSXAV{VcZW6wx-XWVkEovZvSaRa!)kSU$d{erNyu zdC+z=z~O%&hiAzs()0uX_j0S4V(K--&gn88#g;DbrR-9TBrcjhDML7c-z;VLLQhNt zWXHW#e?=B*&~v`lYG`$4g)!?E#(>rZ-6^`^+1RArT}JEo%O>lwej zI^W5Y4tCap%#MpxX*a?*-WiJ(w$I~h% z**)vzOVzQrleI9p^VHpcN-{u<+{*YDSu^S=46j)$d(`{6GbJ-C$vf>UF?7DdkNCnL z$$ZxMUjSE- zW?4mJp`@u9iTT%jj1TRJ%n8K{7u|ygT1qM+S3O^TATYgrl0wZ(1#eAD_37C`q}5ek zBRs#`!&ww`AN{!b?BjrdE+&%5oYi;GmXBc?w>GfK{%(Z9PR?zTIKDp6d^^gcZ??Oa z(8jCJC6y)`(9+VF3XIp|Qb`Df?KnZV^^Barq3+p}RT|Q*am%~5+1?KMm^1?vF>eS9 z;&3dASmzy@dY=a!BH>|Hk;0qDiAgMqTU?+Nx*TM0Y1uPaJT^OvPjl$=1$hP;J$R6q zBk_ZO5Lp_HaA{fT%xELhT`ku!3U+fAd?uZ4$D0!27xzt{8!s))7+D_v2N-Plxs_SF zDJ3aW>coWN@lRV9+#7YIONp$*lMLn9TwhM8R*DWaEt^JM*Sk~#`%0EqVo{B*U4%e* zD{h6vc$r%rqPH>KyG?3-FUzYEE^W;++nX)hnO*qrCf@XKXyQErj2G;Dg8 zI3GaW1C(SG8cM?F(y|>4-E@*CE~Oe)v*v22a$J@QOH6fwowg39ap>vNI9uw%na!x=`;+V9si#XPq6j;THvx_K9M>Sf+_7wI1g zz&8o74c~lNS&uzUxs)T`FXTw`4|yqoF*xiqY7sV(`W^3e68lXoB~G{2rJ5)|rDI?s zfd&&u();GabOwR!hbS#Wu?hoN^xtMt^m6}?js5A;b2A?hoA_pUhkvQs0G#xKA>FL8)+U4=xE zWPupiK*$@PFgl!Keag+h{HQ=>VtShuo(=lUA&*Q$^sv`h3|1Ax5?X*EQJz$T4f$(X z#nuJZkAJb697^sMLfz@REZ%e8z4i|#blo#rC4R#H7{ZslT1zYr&!X3MDx&DP{isFO z5x;Uw&}+MsFJjpwVLM9K#s6rYkG$xH)M=-eWbuL`vo^1!6)B-Dejg>%{`f*Pb?lzF zjh^i)tAptXg}u}O8M|_JJs2sT^$^}`C?0jF!;etvP;2#3Rs~+Z%H*6A&SWRo=0a-{ z1?9f(J$1VWuLMI=_?2=WG8{rUTY3?Od! zF;9AD*P$|WYyh9=N`|G^*T-O9FNp@0y~|E88QU$v*^3#W9Pe z_S6W-?$9(kpFL2R>E z&t6{+ez#Fqtv$GWSOu$;DIQlaBgt8eTi!o$IGR%o*_u?lW`6XLNg?UmNvTXAlWlY$ zFbx%oO>*nAXj2Vc950Ay*c2RFe`z?<&|JKEV^D0m$K$KwT-|%K9+AXMT+$Qe;DfTK$l7{00Mp6w~Cewm{h&)jWptA9FC?P|A z1ZG=*BNys#6Ju;_ZhrMOp{{sH#isWIDAm!;nW4#O`Nv|q)b15cNp z)LS{8q~dT1$Z+w}i?P+&{>gs8Sn&;1sr}ehL!PUxWK%kJu{^=u(?n}}gDkVva;CzG zv+c9T=*j})_2X65OhK!jei{jR5;Tyw$@RUmk9y36^lDt-c(EBOUldzl(p#gK4f8O3)Tx1|ib9X!(G4 zEkVq(H2M5ymE4HG!?Lvw@C$(j%ETfm!2tBClT0HGIe-HL= z{$GA;uasRdM&=4CZ$+v>Fvo{4uh$L*NQBL+L#6Q5Hiv z==fugyk;4meeIF(!AhJYgj(O$J<{~qxyp;iX+(9d+ss3-@pIj%&!zi}oViOZSY7a| zsFHubYA3-mW4Hw=yk?Wvx$973DyIogxsVYH@YQt-GPKKtz+5LOjr)hDPr> zfoKDYGOswb3%|stg`h#=A~hRi@r_evBC@!YRW82v&M&j)Knkn`PbU+#r3K!|m#?7N z+1o#h|DPyRC}e0Yz>7@mUfQ8bb{w|b=Rj<~fSzul8Cd(aH0D1*NvBpoT_+;+NH!QK z1uvDv_hMT8W2CXEgZ$Ncx)(s#u!hrd*H|{=KZc@;KITGiJz|Ztq&rJWiZJ-Bs@FXS z#*#ZyGcCEv`aQqaP@_wkbrV>g+mHPCSyTWCXBCTKfrOH)r~!d%@>^#t@|!(JZ|^Tl zN663*T{u2bVd7H+M1SL(f$Zn!x)5^u)u-bmjva+3E5{GN<)xX-r=|GT@|c`Dd_=Fu zNDb-j`791>o(xgM=jG};AzOY<;kv)6|8X}s1g2+;8<`SSQkq-|5+C#2u=JXoJOZ=# zKRMoVF-a62?apX@pkM1iTzQWtkWO7Lb|5W*W51Rv#f0u*oEpClCui=SNv65REg&xN z8IQt@<=eT5p=N>uvG@N(Z%5mmCIJ%Kk_*gg+Gba=^i!LAA~#{)PKq=yPhpJ zT530u3;f0_V&8M>Ju2fqv6W2Onh@h5PWH0wXZA;&G&5DXKJT{}R3wE3>meT?O6!k? zpI+^|SPz(qvH_#lrFcv_FhU6M1Cw`8ALjRQwtfM>dc-LX+IkYBmIk|5SV)4;h%l33NTD$u z1i5M6UmgNT=kIUe9)yfQ_Kky*@d6R#H3V@7zo`e^kiDUVntS-kUN{uqR{c0Dsd>?c z>^(791hOrvBbJ!2=#rPDRO{LI=f2z4Rfab2hYGI2(t*UO) zA7@XtQuV7Y3z*bDi(s$qBdZa)Q0jHlCGZuSTlIgB24F1Y- zsZQ0@wx6l9LRqrOX@01kqs{v(iLtwfvGDJYLoMj{_RftG=ZZm<&va5p#WjJw6=u^~ z%j=n3$O^4NUt`g+pk$?O`bvlymsEY|jJb|E4y@=cvv1E@&l)71@)M^JfpzQ?@Jb$P zm^5a^Qj^d>;WV`1W|MmR({VX_MfP65VM?fuTd1X@IwbwN8hp^(ytcD#ywC7!(g9Dc zaQW{}{M-A7h%xZHt~sZ;;>GxnJ*K|4{ETVFj*J-xe|}O96H8c80FnzSjMJRO3vFOjFPsqg1HaLbd)0nX1bdFYa{bVIpu zHdPbtK--gXjaN}{zd=#p6nw1R?5WOvOUD#1onMLb-)~aZ3^@zsUbGvGcWfp)DUlpr zEw$Z_Dgj>rHrDG>ia$jhZWz7Vl`7q}56reU0CKSbIEo*+aTja*u(*WHw4keL)XY%a z5`WKsfG{~2()QGjSjEZ1S940hbjOg3V1oQfI41U47NNz`Jh9|Ff(KLwz%+A|41?t(`eE^gwjSV(ewg}6hM8am+xMW<$*R2@(XmWsj)8eq>{C|ivb~Rwo?yMS*ZAyD6DqB=a_PU4E7>=n z^4!Z-m#p}q(olCeXc+&q-0LdMBw^2q4Z2juD_n1+H#0Cpqs(5E-bQ9(bmEDf`Sxs_ zGik#7Y{FxOebs}fO2_6}pSL!*PT0`*#j+#V$Cz%qyW%wKqg;x4N_rv^NEPK7z6J|pmViG`K zq}T(%{YQxj3ZD6lVlO1Q_lyB4YUcNR7{VTDVxD0Q(9!6{f@Kn(3T@DQt?E=5DRoc& zxsi~|r2L1z(3_-7dAsjB`v*Lro66uf4|e}q=0;Z^t$Jxol3Mj!oK2(u%6fd+3t)P@ zx?w}WXUc`}_A~zL#C+36UK#voF!Wf>hzoC~WqW7Mq#DPgk(P!EPGEYg75KxVmBM7x zv*&~PwHyh~ejgOnTsv8Nca;}HDzo?ZZb7s;Hc9Aa`(J@nRrDxa?WRD}k4b=}b@*=E z<41J?;dLQQCTGacu4u2ftwnul`80@57b|(UhnS?wExnz-)1vM*FRbW2P4Y*mCaB`@ zlK@fdNo1*pyUY5SkK9XMxo!Ns>u*ZJ2hR&c&Og5D&=Gg!Rc(kr*zrA1tf5R?u`UP{ z%_y-VJhChb=renIP1FksG|orSR~E?L65|V}j0#H&{N~e=3UU^7p-vwya{MBFoXUlc zMO2cj)0d!xQ@fM?!kY?%f1rW3!Ec%VH)A;Vg=otC2S~npb#Vs*l<6Ru1U((8${9nZb1;+i?K~dR)Nu}UV;c1}Q zrKsVj@|?BZ^y_r9cblAzS#lZ(N&{hUYq3#br)(^m#_Mr(t}WbZqUMMAT~-}V5?fr7 zj{1>|rooIuj>QkXlpxPtWJI$q)@DW8>1?wRF=L*?spPpGsDFvyYB?NxPhT994GoOd z4y77^NzWfVYvQF=%78~X#ZH-(J(|KqLH-G&%}A5q&p!jnWG+uV+XL~AFz1;h25Zfk zx@}s#Qnu`)hX|x_2!1L}K2kS+O77CA$L-W2qTMw$w|vAk9d!<39x%1juc?1B{P#aV zN>Q&*{xm`B!dFl;h1oSWFI3NS7)?!VP$7!-6RAz*V&3&Plh1y} zp%YL0vbY!Da4tr!L3GCE(~-ob3=?Q*-t!*9s8mtg+(CS#L1za`$6)nCeJM6w-^Q)6 zn2t&P$J=?uy8$VJ^tN9mAQxbZoNqY>%Cs7}DRrt04#6(gXGTkWS);)&HCPs_>>ic( zfiQfX*hxIE;lVZ?o^>`+y#cF8pYPxewy;YTp{Nrtzm&$({{XR4tEeEVw7>KQ7Ebd& zDQAuc&?v%;IquRYe9J9dKi=EE_*_&Y_5t|)(NXt9#J)J^lMjGrB57t*i3{`feaOtA7jJNI#@Z{;M=lC%Aps8>xuMQWDYlZ^8{)z}McuSuF^Z1j9{X152a%xf%8B`d6BliSScUd}&k%h2EfJQ&oa&|520L{vIF z``N7}r&9FM&EgB^chEA*6iv&9sE@l`p)t~nohX5Mhve?*6|dr z)q!7&)Oxivf8LiT)|LJY66}ePkRV>Eq;)C25$@-NRkdsg5u!SJ3CaKrjL8+jWtCcz68>LL*0lTun1r`1#!E> zj(8Pq^Ej9(Jj6CSwQIjtg-MuXk?(GSGwABMCs()eAIuqy6th z>GzR*@5dGMIyo4xrK%Umn9QPvUF-N-e!Y|^&KLzt90$`km!Ae%zMPG0t6&QH55UJr zi@O|gQz1X%W6~2V{cH0!BqQMNrOe*G!jgCrs}z{&<3|v_PPj7EIW3BY z?%>^9LByQln~PV?#K~||2FeA|Gm%6wA*`@B^K6=RaY<^O9R`Gp8e^1!Ml63#??_S# zk1K9G88Q5675A<*oe1>{yB*1^U^uup7gd+&9b@dcu`E7WZy?3l#K+cJTr_Qze!~#c zGqzO6qR(Y0S?5&P!7ZH$_K3Bg(*nNObOmdSCgF>Kw3-zca&fFpO|GT#6u`Cj4ID{kaB)^`^V!b&1JiEB^A_U8ih!)0t;A5%s;9=jQ*b{uv7p z``1tNbFP)h%VAZhu-z&sJh8XwN-T-`(6WhUNCd4Q_+&eu@}Jximu^Zdba{@;^3zOz z{v|)H&ZGwKZ@c=hWADN5!NO}M#|{-@U^R3_UH9jrLvcYra#fp za5-BX-6W(FQLk`aQ9QOGF*QCqLu3NZ*$#XfG*7m>DmJp#5X#? zfK2*HB%t~gfTrdUUyi_B6ZCk`Vfl%gWfB1(T3AfS`cJn}Ro~p#^vhbxDVI|ROf@ZW zgFVXIm>hksqc$=Rw!!KIa zHg9rRA!kRD+^N@)Z_&({?(_8lHI@N2qNK8e@U^X^%A?XWst!_vHj%>7QGKe1V5K5F zDh5$7D2W{EbO^1zSMTAPC&{bzs^)+RVRNoTBXh3ZdVEvX&4-H(^MO(9K$yVBTOKod zsGdn!$!||78NNi1y4q0~YUQ&MT9n#*u+pJrSizIgD_yS2)1%MfgK$ck&gW5)vJ394 z69)5GtLFnWSwDuJPjZ)ia0+ucZ6WBMcqRhVQ>Mk(`DNr!JOvPy;PW+=4DRP_jRp5! zuq?l31V~5R%R5?yyAJ%s(_ZWOI+f7oZotVu7(KA=LQa7o|144HY+&p6fMEmI3iX=y zb@})8a5;Yt?&i+&{z0sf?Z&m}fmZf!BL9G)+9sn9Bf!xg1gevl*4PGNvnE1=9Y6kG zD(;8J0k($9@Bk0f{&BoRy`t+_Pd=e|Ch0-16=o_E6Gw&!4+MG@bR;3=4bvJSBDZxd zXZtH=&4qag4#q{E8|woRY<~!%I27}_CMW6kC0NDL%R+GPD*dC?>dC&_?LshL7 zM3KxMesfGrMaid{tcWGC8`1qjfrYB?aDWOn$2{ncTvnCE}P_><-rteqNf(umgi5iQI+IfI4I(K*W5!YCR*;9OJB;YduuhHQ4ILgq z(=xv@d0H*2)peI>Hb3e70v#9#!jG5rET@XAU=94%7Q8RdUx9;%Pt32OVknc=BCU+rUq@0$*1h;&T80)Q8})(Qz1h`RZNtnOwtt>8_ZhTEmt5#k^7Lzp}i49^b}XU zB)USK$!iw^w9)d;2?mYny4Iqej7rG>Css~1KF+nwH(6JY_Bk4Obv$y_fVNaADDl#- zY4P@wcbDyOh_LmjhZR@XPPqH((I8{=A59y7(Z-XhKOZy%^oA13Fak$ghKWh0GDfHR zYbQenSsiztZ79t?Nj|5Bo=Zz*#d3TQJ$~Cpt)MP4)Oz1c5tPj3KAanc2Y5k@q$7*d zB4}#xI|GB;xp>DU!FMnu&7+2EmD^E}9G1igid;+2ZI#4G5BfKja@5HhMZ5s=%|w2! zlNuB>*xkbf?$-Hr5eBE#jfF@L;|=iZ-2W1JwFLD<(dsRxu$E#+4to8PT)c&^lvBIx zVnNit7t{KdZ5+!7Z^6BJr8WNt<32jBlHs6uL!an7m%nj9aEbHvR41v*?^yuvwkA!)_IT|e!au5Y6pgn&5Ar0MfX#}#G_%99nZ!i*Q!u$HAmnXt#6 zsW|m__^meP079?>+5z3S5RZUZd+EgfW3X=Ve)*4CYA9`+J+1iXvXJHL|F0G8QCcyQ z4-{jOi=G+rv5U)##ueL8wR(cB9^+ffz`(~7S`~`{`J{v+rZ#d-8bavf%8Dr1p%{nn z1nR|W&u$m{=H86-S&jIY7DzV%AC4)L@cL$xTweT&6amBO{^8Pt(6z@km8Ea?JGO72 z-?hz54dUXV-&+4mnUEpBZU({#kuY>9AQMMK(6?A9VP3&Sftm%Ds%3lKNNkET~ zdl?9=@sw!EeZjETKe!lHQe}JmkiG4};V@K}vtjujsE42m=SCVMi)XB|T z&LZdP?v52Y(;v*7>T_l-943A!>8!n0WPy`$$Ri7du4;wzm1+$GU!h=>n>-M8ArGz% zN~?W{*5!kWumWnkZ0#S8plPa?r=DXShKQC%od6()vhYP-^$lB22n(NK9_+$jvY+TU zCl=RzwhvEZA;#_)=QnIqQ)FQHCKg-yX|&M-&9+YNI0^dq@uiZzCiOA1=-zL_XhRbN zYD10Ua+$YN=`T*ZQZ)>1sQj%L!5s6=V|8VlD!IYbTrp+k2XK!dFKO?^8fhceyQn(| zn`Q$NKJ7mwjg%ZUT>zfFwlu$FFH9%b_(VNn z|K>GRK<4-jEg=6?#ALda&R3s%3SsN>J)Xf!Z`MaOobbIl;NAc;KwgjR}IA=RqCt#)diDKzTC0 zebmb(wqZPz-B@%X6GRtkqIRL4k$eCkldkplyPPOzm=?)cLBQJo32sWiypm#s@>e;x$$k z-?&^X&00ib^!Fz~wsol&lu3nvN6Pp3d%pl=vn_6WeS3${q2kcP!J9B^df(^r{DeSN z|MurjrjJS3uW14D&uX5zjXAYS1u&Yvq~3LHeCzytA0K;{x!_YvvS7i(ZK%<={PVJvQwn<>=~Y0eeLkEYH;H+(xoD^3Qe9YjdbLq52RJw&Azm&TNlgTF$Edk26wa}Y%rgcrzUh zl{}klWqOt8AfmCRL_<0s#C}`7KOddi*jq;_3}sUfYWz(}!VC@9{FoJzRtK^3n*vsg zz4f?kz2^B>p9-nc&}unwJUhXvI_K)*_oy|%Wt0TB_p?UZ*5Ai{$vfb;pdF@>{wJ&g z(fsaWW3bRic`KW)tg&=hdBYf@X$pN`QdDIb&bK!>svBR-iGn2`cNN%}06%&o&`tO(|u;G+% z~Zea;ezIjBnnISWS}Xy5*TLQmIPZ|Du_aUc_!o(s=+C!b(a&hiD*;y}@f6MW(5)|kY( z$~Ky*DQERA53eZvQK$>;E;W4K`|HF?P7}deg_pPWvNu+7X_d3v*d&swZAWd)g>q3l z_!ycRtl8imzq7O>OU2Z1pb=WW`bZ?_m{@RY3?&^@$CqkEi*=<_>JsEWh6(fXS)rDo%&L$3d`o}1x}zhuRy&#BghvBVer|<-734cCCkI}Pf zkJ-MVp|OEg{bMX^XFq2Q5WRdwjiWg3;5mI;gx{@jsDY{U@N(B_^{GwIm3FMm*2h+E zSW!sVxPnI<7q658bTHmtER!Pw^R5zWM~RTKnbdHo$umZeP>^#Itm)xQS68aekVtTyrT*_peGo*CtHTD%uHlE> zQtB1(ZfYW;ak%prXMzgfzIq_7k*VBE(kj?eM(=TebTA38iG=*&-Y&;mBtmf92?#S8 z<JOh;zjXhG*Ww_IEap7-0E1iu<=Hf zxHiT5fhwpQ9_&MM$Yp(+M$)asQ4-BZC%cWvn4HArKvK1-8KVm_(?y4bcJn?7pk{&y zd?UZ|PKi9NbieM{xD+Cps^SJE=%yEUck+DHk%$ub-I3@5Zf}v8Cna@i{NNw5`Nz!rC?-SWd*-^xk!uaI*)7Srp==k?Xi;mR^exveCn(w-=;ZWIm6x0XrA$nG| z^KNn0Dxy~WQEuv!AEUKV_BlbKBycy*xJ=V1FWGVOihx+gunjclPtEViC|~n9B*{6s z+=_r?pQP%GkDBSlvsy4oQ6~>ztP+}N(B{6gbL=1Q9Q`qpUK=L<4$W)-Z@V8iC>eQ| zQm(D_^Hph4j}z1NN4;##9X`(C_aSf(daL9id?RN(Ci{K&dw!OLm9*Wzc%AkBLB~K^ zCApoP!BQFRT!oG2Xa#gk&v_0CA12QG@eZGXeLkN>oHA)?ZiCGh)%~0YRUwOWZuw)S zrtPv$SHBQ3-l zh4%ZCJRF;*2FJA+mr?i*)MO-hOzXp&{allAp&vkbZ*X#`X6PVv)UdKb$1`D|Uc6GfmjDD!IU_l+dR;YCf-h+do9{HeFkvI^*521gu3$gYH|reay_}>|zhN zlZz6o6sV>3TA~Z4+y(%>hxjHFegH$*!zcG?>K+|l{h~@7_4GNnzNYBIvnIt{D+ERn6{lEm!85Y9UVs zT_^jO2uDj(=TC-Cqi7XwSmA7>I+Y${?TaH-Cl+lAf0g|g4?;v--1(zZ11d|3 zj-15l{>{dxh)rT>Fzi>ns*X9qG!k4rc^JyIx)F}@yq}Bi{{b2`O&m^>3C|C6sRgTanT)%6NU(`T&n8kez)Qhk_I=o889wN@$BuZhhQbG zpuiPQu%37u7(z$fmq*^H#ilBk6YFue75og(!9e6Tf=1}$?`SPUI*b4OGnFW$^+{eO zY<+ApIll2~wWy#`ooY>CIOWoszitk`Li^iF?QhTvJtRyHuJuw705!ZzZ;yQ&S#n$H#UM|?x9@Ue?MrL_%3|P^B=F!MJ`wI4ne+y( z0>4AexGzSvR@B8BiEcOsns>;bkk%$pmTWa^`9o+_%nk#UOrbw_vIn$m-Z6ozmWX z==E(PddG^$6%#w7_d564h`tOYpxCbxKKd@l+lx<~gq=F|_mB2Kz#O zIQq__YEPR5D&iL6RWrGp+tdxxwM3d~_jF3}wj@>dGL5`xgP2^pIo(uTq1KJ{=qe}E z%PVb@V%OoJcM@p;mkb!Mlp;I#XZz^L=zHKJ^r?%_HBJ9)_H+cWY;4M+_T08;kZKg_ z;;<5-53G8zHW_a+SvJc2ns2_$8uYBU$%AXOdojG7{qC6?nQ64wzUu{>!t;bpErxnv zSqL?1iBK0LvUa9t%Pah@l&1RA)ZnR&LG#WYL<)$Bvc?M|PE_Q1@7e3_$2nx=pA0yT zeGl$#3faJWTg^f2)gH`J=S}})lSE#?TJ{c5j=IPG)x!~t5-(P#J$X}4ONsll@HTSD z&Y1sCjqRwwGu}QFkh>F12ixwdUEix1N_B@E)Z!**lpNTO4haepjZ zW8ui<1jpP4EzuuN9c@Q?<$gG&~V_>V5t*TIu_}N zKYS-zwdSQ)+vvXPBqz_1dX46LuU-N+DhvBxQ{dPCLu4u4qE|LsZG4c&W;B zW0MQPI4G9YN&g51p|ofuQ-2Zlp6E;5*YpU|FkbC`!idU@qqo^d;}PGnB^7GKQF0S$ zsUrb&_WS6glTYb7XZ`lg;qByxLp9O^jmD|+$$*rRi@L}QzOBz?N=5PzliU+0D^ zPaF25%SCz<(o*##k*E|6qbjZ;>GcuWNiWoH86{m4klG=gpIO+ubrev_7T2^R6%M9PP%S7jh{9ZwDe zDS=++&pRag`OwAQoS&tT`PgK6tK&x)UIyvd`C61=qbh;=u{HU&uMbNbK|C7&%|tWf zv%4usEngNYA1@_|(J*K%h@j%Pds)U8Dq?w1>>ey?XbJoD?>S>@rnwrDM^W8|Mkg|z z8d5M-e0)3B`1S2)CLd%_PTGzB(FuALg17rpp^qcL)~581?iD*SE?aM_6M}6t(u?j| zn6&t(p$4<%J9h(vp5aKp>4eVGi&s828W7_;1EoDd#Vy)<1NUj3q@+CW)&wV{bo)AS zA4N7PDp-Xb;Ok*a!d92dPTzu(c%GXtX)aRZeI_s@RgwarWLZmbCc+;7R=J z=dbV!l>h*7*W{H^BlRbxSQWCAgi z99$d-UX^IBV36lKcl7mDMfhH3tQL)P6)?3QiUYc|d*^fF z;am9qeK`Sxvf2r;C?pmag65+7sn1DVValLvyp!%AZ<2WA_EA*}=^3A!(-^>H40Ga8 ztwEg(U?Uyj2=?kUEtNz>I)iq&>8)mvNZ^^+sxzb7uv!h{nH|&KRn$i(Z~2Oa0%21b zAVrd`ev;caRNLFkBNzI1M+$sG=KI=IJFn`BXlpgTO=`uGPcL=*K-IPge{9Fm{`tv5 zl7mBB@oW6`(q1h_2^|SR8PB+we%nbzXoQKbMKzBxK}Fp$)B`a6(= zDADou(`h^?>hiEZ3o?~yaJifF?4KI9Ezhg@k)&l-T`KK?1iN+8?-3@@uM#vj)_AIE zyUovvWkO5DCUCPEn@P3?$-_ks zSi;QhA{%AdVnU3BwH1E_lQo|n@<)?u{otIIHr<5Jk2d=B92Zyof~`+rF55*FZLLhM z(9h}IhiwL835e=6;U}Xv4AWJx;^VcHD?Upz4Fg`Cn|nDj4#R|keT6=5;9Krd{huP& zwODNw@t>EQ58De%js`P6pMBn&pUruaTX!M9?sJ_=Qi7!6D=W5vq9DB>NzsaZAbFAC z#ER4W790ul1Iv%>dpO2{Dx}Jla0lrnb-~ChZ3bMGDVnlE>-;rd z1|e<&QEzK+J_Z*3{(ix;O~&hR`Y90Yp62L%huU66#yLVR`Sd7Pgf6~J42c%!gtE=a zK;u(3)H9IfJU#+sgx+2zBMq@r3y&syHH!1$Av@RQ$xe>GBGal~7e1ob*x$>Qy%x&z z#SW3`)>`3PoSmP^Ni7OL@MjF!dRQ3CGp}s8o5~p3*><1>DhhY)d^B~Dy30kWZx{IK zF!shn#vjDoO@wNfwimJ;&gMD{LJ-g9`@IO*fDo;7QT=yr8BP!$qgJgNkkvW{pE9p<@sHecwRc3ZZO#Rul;>=4#Zn^6WtM&a$W7E!t@wv-rIR z0N^QCqew5RXBxswqc#?wJKfwh&4<({$IzXLb}rV2?BqYS!)Xq~_t&F3e$-R;IfQkp zp70ZgZTEAfqZ4t}D`P|FjaBHa%}&?u9MOMr)&U7JD)zsfKwif#AQQa2r{%rYDr`A) z!85e2!Rs%zukpK8JU(8$x#w*$`;tD}7&KJJnn1Ol*npLV2jaVAAEj>a3O^4}pl{_m z7IqYOMsUB0CB)ow5yEp7?v2k1NYXJpKZnMTJ*9Z~PvIn!IVpTX9@<^nJhCg0YVmGuyMr*z=u!%&lwV$hDFPe^*A3qB9NFp zPwkalTFK%t)8-SH<^)#cot=bg`igL%D`ek1nIA*(XqT@G>T%Vt>u|S<;hZ4`kSku< zQdA8utbARdB4aG5z^FVNky0!$VsPrQMJyKmUh$njuQZQbI-B6gwb!(s>TtCda%vn; zJ=Y9z=;u32?DO{DXk|z2-4qybw35f+DuXIF)(zd8o+=*URn5{g1?QwUgvo1l7{kxO z@NwfxwQ~B2RyrbnkBy>PS!aA64hpxROemtn@*qh1*Rw1 zZHo-~(9mYabKkVviak4ie`kLo|Nl{R-r;P%ZyOIqQPixh_NMlxY6n3g_FlDD?A5AO zdxY4d#EMyaE4BCDv|2NYs-oz(e(&@ClOum7IiCBL?*zJBT?kejzcv;rUOBtH7}W@Aq0~%a9OJq~HhY&<~c86D|$q zQ<40_$7q?}Z1QoWpe|Z+!mrSdbkR|q7|3*W|3=^O45C)SztkfYE04#XL%2zCBWsRj zO@??x@%h@Xxo4D_j&GhyI-X1Q4a^KRQKog({}r{ndPM8br!G1AB-rs^Vls!U>&efw z9hTUTnk)w{Y=}&a6IrY=R*v+#>n%mA<+t34vI=9waQR39(eP+T?3>ud0u%7%>+iYQ z#|ngUu6@Yv9}{b|0e*8SYIlQcRrx+`x6*X=#cH=7GY6y*%`T2S%Z#9mtUQFHjJ7rw z-^X|cY*QvHnAKkqJ--Zdv*`I&*v2RG&x*`b{7nHBqt|^WpMXQ@Qxbvs%4JM-L&NDN zUGXZyb<41$X?-aW`ADt3dFj|Nc4Zv=lfSjg|M$yiek6Q%e@?2Qxj7hVa!lFB>||_SB zjVvASS1TK7y222ChNXNxlHQqXD{kRA&4rCGE>el$NQ?dR4TBJB%-D(hLGEn1NqK&A zR>|`D1hgCHXGGkG%BFGE>`lCe3p|<5=AXjQ1|CMRa~DN)(hc7Rb?9W$b=ksUt;@_xj;uJ&Fh{KARwWL)h0(cK)F}hF z(y!YX(7>vQ#LY*exQOeyu4ubaq{^m(bID@UHceIeI|2IGX`~&f{5pJ|PXO|O; ze@&OZAor$*@3m!DI=DkpOv*QjvZ*NUFK&I^BTmz0WwD1J zIQ^xx!)3{%wSff!;9}#qUdUDc^`J3x;BLsWE7<?xuq+jXSi5Hs6c2k&03DcTf-pO!FlH^ddXOyOX*>82b)X|FOPC#}ydA!e7 zf4%3x&}piv+bMejs6ad2M}cXQNg*@J`}LO06n!aC{}AOloG?$mX#$DQ7H>;LXp8HZ zH35=y4W*b4fL*alCurqfz}TzZ@yasrNl@?JX?4Fri<`?LdC%YD6H9%(Bg7NMXZ8>c z2R#CSJA|*8Bow6e^(8G29|Y4>3M%yEUB}#R5~?YxW^c=M%=bGp)^@M$t0#r-o&5Dv zm+rAM;S{?_9VqX^advqaS-n)F8)9+GrsG z)N5DwUUvKLAUO_5KRr#Rl`K!H-iYS*EZjU;*{}C-uq|I^>%%F;n96i8Nk1z9B0$#` zHEhSh+t+3&jTFrfFKhl`nN>H9mRLEb#I)>AdD7LVlKgi&v3Y>u86dVEuKzV{mGVs8 z%J+VxAusp0-g92Aq26%$a z$DYS6HU~Vn|C&>{OWRw^0b=<81HIu`JIrP(M=Da`hg+68cetS!d7 zu2v&Y!bHk2*wc)-LO|G+StIgMgI|%9%6Qy3aZ>8F$$dkrseA84wa399xkY0ktGz;-2``Y3NQE!Uwy0dD9$A9u@$K< zX1-{rDav&*z)hNNw+N9Umr>a*7;h}1t?yqnf%L-!4F~fldTy_%OSaK4SzGOI_*O0~ z98^SQ!OWBq{GNMVWo2vYn<8B+u$(2JGf3RKTuR-K&l2@d*+2cLX72UlJaT42@^#GK z)z+;9N@8U}=*mgWs`G1|zINgAZh1F)8ln+$>ujA3siM6P<~^>-!hpkAA{5Z`CgH{5 zMwNT#+8Q~7C{?{UZ*iUN6va^gwd{UkC^S41pXWXMP32`uSAs;^ixErfDVDOT3xxaC zY{iR;*Q~)gt~+8K;94%Ec~FZnlT{>>(75yh84MB@Hm`SMz-*9|hvo_A5`;5p|gWkXEumTh&fN&?|G8MY+F$jnPJS zkyQC?k%Gz`GsUcqF*|nxy~~1@RyhUu9UCHU+{2smBh>_n=il^kOy=N}u@zZ@uLBa@ zBl!Ab!`ahJ)b?i2Bsh0}`*pUOLP88Tp{lZI6o`O?E|(~t4#e#9S?j>e0tcAgdlan<%t3@UgLT&D z6Mz5b#@L!@)2^BK&2OW=^6&1=_55#%0FE=iBVK(cm#LfPv>NC4dmr0^r~at@q3Z5B zxZQuDQutT<^HW0H!Be=WW)e#dp!KctKG!VJEY_RhYsT#?wa`U-l=zmoezRx8DE=Y{WSlfrG8zfz@O8 zNQ#=Xq#s{xrQ4-iirD9@0!&*=)x}@bfp~@)eCG|>y`LUYnNhNJKGBX~`p?Iim<-PR z37Y%@OUDY~<5~E&-}7CIpL48U$+H-h6C6+dg%8(}%$BT(Ea1)iS z)=sM7F~Jseo|jeLP+jFoRhL7Vd|4MYrgFkqAwmJpe-`X|tq7jzx6Zp6&1le z2Xpel;(ONJ)>@51S=|2;Q%dkR_gxQN_99d~eKVK)@yfc{DuUXc?dUVW(w4feCGgju zJ9^G`O6EtL$uv~@OTctaZWW&h4{-}OmEse6wi`dCwFzn{`dj=r5!SUAoJ_eqX>~e6 z|Ejv;moIh;fw&Q`4}giLxikC5xXgR$mu7FDdr!B`I6wQwDA;ZX@d_&ud;3|@Tw>2j zR!|*hU@1i>P`>r1=!sGswQa3-gRNF=-6Z_2eA!QHI-eP&%px-9!7EUGh4Cexf9J-b>a zYn`g|t>&QG0;I)WRsVEP`PDv_(kiOKOipwQPW*!r55490?E2>Y^7b`0O=X`>(s>vW zJZ=4?B7N)oLx$GWuCmgZnKpI)B{`7&%OhhWMZI1A%mD7gl>&QkFAZT=kGkd7#0CvF z19DYZNI=3<59#)eu0!xsbf_^vO>^`w%#T~&*Dl=bTYz~^PDQJa5@VTW^n?$W4=Ycn zh7KsP4ff+^HOrqbHZzV^%J=YvlOESXFlJ)z^RT7mz)2I1H0WNHM@&A@ECdn30=qTx zZvTnvjf{r0XgHW0)BD$DsVi>1DL11gWyO7L3&fWG|5lATG`LMQD9~RuDUnYF33wIY zD|>?iWoRf*rq)W5EO(Z(tF3lf(r~(FSzO&Lom<(*jb4G#a1rUZ?0w@qzB_{W7P@hL zvHX|-aa6!U2KC~pZf8?|Lx&>m{Y5zq|VMhdgAt zSo(uRCwtC}#qzj&DEAp*Y$jJOO)11)(HhOv3 z;=l^e#utdeD>+X!p32JYl^gVPY9CB#KFq{S;Y^^9=G5~00nrQlr$PX{(&(izS#^*0 zzf{xSA_3M2YOeoZMa@*l9lNpZSk_Y)UVV%}B_bdCN={OeTkqB{0d1K)w$KKfu{Ys? z0g7kE?&U*;X;6!h*0nMoAS>$zWM#BLel3Fs*UL#KnPR_Ow%`?i(|h?X0ulfOhMQ(P zD@lemPvYq2)Wd$@t!`t5@4se|U%7{c4?`CrFHY1yA(p+r1FSTLhod~*>+1|7|3&bZ z4TWlULGMR!CxRTo1*53IF2)pgZ>GW%Xt>y(M0cz|8wI`9B_ryT0xcfGwwgGUQFsmC z8|q{<%}zFMQi^Oda2zt84K1!^CaTJ0s67$XhKK%FBzCo(=${|^)gXWaSoSk>C0o18 zg<$+jwLXis3`wYzTa6~DvlFX!Mn3i{fd1&}d2Qs!cq1rK_*f99{goJR9*A<3`%xMq zgKvlW7DrX+W4dZ&`eS+_@-Y@%?wVFKhv|Nv(72Cm(2qva4<5-MycxwaOKbDhj0e&+ z-m!5bo20i^bHVxlFdr`hWxjbc%F$Ane>s_z&x@nwf` z@zlRqaL8Mg?1Ot7zXtybGp~{jru*C)fA}Z&Cn+pO<1RHYnEE7dG<5<-Tip6^)e5I9 zRjPsn95L2)<*Zx3e2AfLd2Cuq$$=X{ha*k3(EC^Yk2X(%Km zA*uHbsYvy&KLq5vw({H|RJC;OLT2R;Z*A_z`fE2G%~u_|Nl!$GYW!Zp^mLuyI15@HJkcbp2B&nh1Jbrm5p@U(eF(N@7B8JVuT>zNfXz+CC}RC;oALLkIi{# zOpU&q>Z*M!0CR?ZRauG(>-Oi1lQOmriZ{p-khP!;FT5x_yc@*2 zt>w|si}R`raz)Ac&Hjrwgr*=rZhO_4tGYBaB_&$nl&|DVxOd60$JYwJXo3dba@l(S zQ6qM*!Qr!_JV2ySpU&2_hssLde@uN+BvMdFZEpQQ8+uYe7u-0JKy=qt zK_B(c`NdS|phkJahc9HCBq`@LeRNYXS+!bIr%m$wuC z=Q~L#d_FI5el@RZ6v;sh;^Za2$8mx8W2HuFCDZ6RDjaby$6+Z}-1PJT0@fRxbXlb+ zV6Y%nUjIDPrEZ5D_0b(9L`(T34bO;_&?S-%M>%yz%tDVd2)nl6BjI26eKB!dBMa`a z=oF2sQ2l0^#WHx)d>l^`^*D#x4wIOu&A%q67hxDy7FAQkE5rT0nE2B5<$O}wvrgf1 z`a9}R_Ee=hHyj3!CdwP3+-H7O6Emg}`3O;v3(Dc7N@`| zv#S7naC~nA5;Lw?NeCNXj5-A9y>&EX*7bY_kw(n%WkPQt+Xg=KN@sau|{EEk!>dj9&$@Phd^E(R4ni6^z^unwtEN!Aue zBc{5zTHpfrZG)aiep6QQF<3&^N_2>R1<`CK6!I}=Iq!I>GOL5PHb>6a?aa|N z`Kl!=TQ6^EWNBhfTi)3*;3YWwi)W~xH{k@j&@1Wjac#^EFyZVbk1Io6Oj~ z`Sn%x*Q|g$@3@^LDA;_sJlTP?W*BZJ$K^neP&d+=LQ-jSP;IRY06zL~~Njq9NAa}~eIHCDbk_eD&n z43~3Dv6qEV?RxF&kb4kE59)J^WJ)C9H{ee(K)wkSXzKIi*}4oimBJHa&S?D!OWPOu zGrN1ZwST!|C=foQ^mFZZzgFwrD=nQJri;u5H>DkgAQt}vJ>=eYT!y*X`zhpyb{E`C z3SRT^z-V84jAUu|+BVN>cmEltXtNT?Y<5%y(`lb=f3&S#%e<1Z6tG!&=D$)(oO-ri zy_A@`+<5L<(z@hk$shp-M!g7;rTjXJloVG?h>bRls;9g);5Dd%wWMxTq>d_aIiWE( z@(lMzc}Ky+i%VR8%gs(~n7ZZahZM{5oGg#~};-USNvyUAPF)$*2PJpYBov zMU4SN47xL4UaVJdDj_q7_7`wyoZ4eb9D}RsYf-IB4^KIN3@PG7GCHk%h&$OT!76fX zA1xRfly+}qL-sCwPtLsPxhS1f$wid;6c8Z~Y_-W82O~TEkZXwwp0qpX0`n}pn1YU|h z?x`f{vCDNk@wLVxfoj}N)~;r4$ikWOt<0ViG1mjDY^P(waxG~`1IUPA-rq{YAn{b5 zj?QQAQCa%<+&J$CDTjoBxs(XyMBC%*t*)rSa^~`U8bMz3;>w^=5htENe|HRKP+L(? zW)f;+qAHqqHQA%*y@gl4*(H7mXye@(efwL>Hv^lxVrhc>5Fl9`F`SV=KH_hY&)J_e zeD~9dN>uR_*7EaCCjU9*K4Wc*4l`B52w$411lpUoA001@hJ@u30E3b);m%bocql}KGF)W z$w;DtmN$#p`R@~IkkCL&I_txI`*|I@JTq1hPH7U3|D)Y!G{Y^~q}zBP5dNYG>MuDk z#F9S8N1M`fn7(tD5$04|aI#Evamj+Q5`YXi=0EgD&!zF8bruQQL7f0Tot&e%6A}eFZF6kjA%RZu*RIAWh{6j&Q?DtZ!Ylhd(k73E z&v%((7z__@8%^|FQtQ&`+^h0bw>CRkmyK-F!b}LeD}owr1Z2TH?&!>6t8!#;>7i56 zSU`<0E^-I+QJ9Dh;#U0VTH*?3y2@Dm$Eq^uGL!SH4xgx%Th*2_NbF77#?xhZLu}B* zl+9|!r`-DJVv_)A3xclX>o?iJ#U#Vj7E!!uA3G%7B2|nu1(g?ti zzaW^q0^iwu)Wo^xH2D2?#7J(TiiI_`-m4qV(JngOc^cf3Sy)(+eJ7_iW(35775KGS zgoPCsXq&p<$%gZ18RzKk6IbDruxyIUt)ZE%7Tr&xTUJmje5>;k1#FDR_fDz(ZY2w1p;S}Ffc z|CZ+H8Mg4P%o}tJ*c;{h`Kk{Wdit__Q?cfG#?z=Rk;S>d;-~A2N5scH$Yxf{ba{Wg zI$66)J_Veanq|2F4(h&rCw^g{?VWRetah`j%|ptLR?jQbr%Lc0QthnZVEIys`2!l^ z_mWcIgz9G8jFA{#J*;hcg~bWXm%1<{b6~YHnP$0v*t1XO%q)oavs^9^fAv6|qVq{d zr&GPz41(Oi(zy?b&5loLp>{S_^q#ALuVhO@QPd`W^C|K^DH`rV4YwL{@zRohEpArOx$5@6!m+hu z)uzTcBvV~w&V%kz{Gr-h%{+@%i3jvuHuxGpPrw-iSozT*Bzw#`w=lsa-bY;`TdxzZ z3hJ+q6}_e~+TTO+2$YQXbZ#Jsov z&T=ynILMx*to`k^ReBhELX#L@2K!^}2m9ue(B+aPhnYoz6Sm7xmb~?%;58xY_Q~m0 z;eWb5m->a-!cO)6v!+|_g@{7T9M<$Sg+}A{qcrVwqIhWD6op22C?d1>z*mFlh#&)L zDU!b+Zud8OKXD;G@OqISnRiKtK!<#TQ38o9?7$rJ75HCNybkfykMX zIG%wnNqJq3YXT8l9e3h#{$lNAI}0_bn{wuGlLh=YKQqk)DoYILvr7bE@&bB_d5c&u zELI)G_1}|WyA~_So$d3B)hZQHWOq+W5=>#Db`YI=ecdgseje2){qU6Q8;WqTm_83W zG{Eeh`L%*ePBh0q{6DLeIU7y$Cz`Z8qeRA%fPEqx7_HtNOq>}lYrBr}PXH}`k{~cU zeRX^+Fm>_8JaBWSSWi)biDtRaOscg+FcBlUlsQ{hQou34;ZLzvxY^rYU+-tLP zhL4q5U8G`YOj1#5m6fMVg}QW%CF@d)on%!B_b&pk%*{OVL@QEZocU~{C9Vu`=JOeK zt@t-?lJxx&)qbOp$(aIUk@t1S`zy~f2v@n`i+q5JT~;Z+1d~NV==$)@qDOepmP>e? z$->ytJsbH6kX*N-)=Oz;rG|4<4q`c_m4Df&$*-M< zJHpU?cS;8fc)}Cj8MXItKU0u|Z$?&u8Z5+g;yRPUyBeiZ(D)ha#&I821t?+<% zs`&kjMVa=B0Dqllil$m$#2`!zp{YJolXCVOvtwWrg(3+d2PDGPa<;fhlTUKVQxjW(1JlvrF%(U`BUs9llrM)4m z4UUYqK+pfCwqjWL&AtMloVyhNkrKIH{Yv>gCC|b6po@jaYGKs#Wh$K@PjTjbw@-F4nOD@o~vOh7E^&`!B?L*6&@*mHDvEbpYWS; zD>NsjKBcs|{hNc$IFLi0hJ~*4VJl1Ai{&oY466i}Lz@4U9zq_Fe;P8pPiZs%ac5v} z9k;Jm!$&pkj)E+&t3z+M^Z zu*wtt@YFm3HJ`*pqIY|4jl|9Romcm{8p)VX@a`h>KItNG=l$MT*S^`vg`2q5Oy#1A zr>ylh#beIC*=`2^;(Mh2)`yYBMHR>$GO0tg{%b~r&c}*32X@dFXUVs-uYsG%oW<`v z#=Yf_RP@XD(bqOIdw zU7Hovj+er%B0e|yf4J-~iI!CXe-Z2It$x_RjkbIj<0NeEB$b0dQ@wwL()sq(aFF{z z07yoZthfxNXmtn-X}$-p241@MJ1T-&<>Y(o)ksMMUP)ZmsnYHF?O=8WeF>cZ2JAdqlc-Ic@O zV3m#8S4Pv)b^E9GrA?slblMXHwuMjY7!wcvONWghjrEQsCkKh*>@&C?wD&&m%H?tP z)4CZj2>Vd6Lo%gLJ6h_g=KME3*zv4iG2dA;Mdiv)Y5Lndn1A(c4VTuR{j9R@D@#hw z77*CVa@6-*rEIbb{q>9gUNZ)Seezcmm>XE*Orc@EH}@@+e!2St%;lIW#JOL#sHt}+ zE?F@NW(f#4>6jKsXo|S~e__Tjy123-#tt)&UnwX{aVPQLgo#7}a0%)BbF^dSnG- z9>%BsDM~Ulv^!V@=5`4P#X9s-S(1n>yvH15kO*eG_}A+uO0c=I$6A zxzsnsVt7h~ z{s($wRDAmC>+TFZOVs&>v7Tqxk-_Z*{tt-{)6Z?ywXB+%fho)Edq4+1bE~QTcDR&n z5~wWd6x6v1DAvW?Z>$jeE-Tib=3M4IkEzx3?A!^`gdnClT^~t!;ztWHQb33Y1k>2; z*4a&)4auKy5G?M`29)B5fLVnK<)#e7e!3@i#1Go%Zvz1jN5=JXi#t3wyjN_BzdpeP zII-eF)WsXzGlw0o(7J64Z_MS(5LmQJ#c2SVlu=|EtWpU07&Z!(xD;EZ9(ENHB=%2< z0#%>nD>rU4uX^En00ESi`OY2yVWDJA@-)yezc7e{rt>SQY3@RDMHcs$tulql%)n&& zLeCvNWSmh-=INZ=%lNi-Ko#w>Sa*3?rfV4>OVpEF1NjZp$acq536CtGE+)hjuszJXI`&IB%drAW|;GK2i$bT1sskES9n&I9WNSWTZW=b zAq_U$7{MLliCwb<;l_wA6|Wb3d2KSGM)QShCrN6?Ie)m2`Qbnq6rbr)e$W1@``_d} zeEuej?sr@Gz}JT81oE$%AlO>JfI73Zc}ad~TmjDtz3Cq=l*5pm&zv%IcfwARdw3DO zzY7mdE%+tO<+^G6my0>g?%KIx0{3OA)%~{NZkFUgGXKU%{jlf`b6Rw9uah8`k1S@& ziKlBzMo1fBtUdK#f?*8z0ZfN^y4Kx+_MgnQKs}3aL-{90KFzD$?(UkFkW8KC!8zF3 znJ!0{n@f3NRnX=(jXu0e3RA~14YkZSDBe-|^c1g_rSZG@?2@dTn?8lw6+oXoc511B zOnlLFlBpO>k(l{D(Ci2#LEtT!e3}^iPL26o=DvKnu)&{!)-%l5U&ZieA~L50wF`>m z%rkb{tC;EMXtvS1eNs!t8lGUMQUka07ai+g3?OFxxU%P8xefmtsxXuH5NUKWc~UA6 z9y;x!suo1`?Xf6vA&~;Llkk(b*%>5NoGX#<5!jiAsK8S~(k0z+G;e>b6^f`uR(q(R zMJyzct)4?D-7mbCgWAaCnzwOLTOcjSwGpcJ$#il6*msF2gLvL0YX1Nc>QZ%6;fCP# zkhqXawEqW(9=Cb0Skbn3=X5J32<$fr z1Kq7)nKv@*{h2K`@`mf5pcBuvN=;pCA89?Ko7W;>wR{A^u@wIQ&UPSAgyB>PY+d^- zo=Jy$M;mt!%SZM6l~!V_C|eod-$*v$L1qSz@pDVl?~4spZe0c<;~=Ol`DgKlGYvIY z6y_=XO!ZujcsU!l4(A&#GIdrc?n9!`mEOWZu@-DUMbd%MlKB2V*|uklvfx!?*E*WW0UFR-k7Pc)iW?i z>H+fMfp}b}74%!H#`$w&%^YnnN*ExNFv|KBMn%)7{VVmEUg&J)2n^~bwtFq{<+)zT zis6r(TiPT;*uu^WokwHI`K^sZO8G)EI{`{|uU|SGOCY{hR{o3S2()WR{l}$#<`6nk{8F{X>(vapobkL*u)ZoEYHvAqq9QH2X&&ytH^a#8hf5k6QpPN;)~v{fFM_2lTeY30vv8KD*G zDm9AuYV%()EmW7tNXPV~d!cDF)d_PoLWFcvB)6z_n&cZOgL`ZxCJGlni)Y=lCZ0?t zCVKAVbMm6qK57)W+U__Bk9j7y&Tl>GuRPq#F*%ztfzbEViA!r6m}xpgg$J30N-eRC zl_^dMGD^P`W;EWL>5CLp_Z8(1E*>~YS65cF%}U=XxTRK{NeleVllCH3JVBxH0kJ-p zK1F2{HNdodoV#6-C!hFb*MDWrsDxB=_b}^#Cq(K&{hbqvXrFlVK6PBcld5%7p8jYt zj348_dtS`pOW9FjjZ)AylPk5(`bCc(O4)8aw+I$>M<{l&yo_J@YUB1MV{sK;75DSG zAF{k-X5#uaT2h9^M%nmP&;kt+@{Zp(%d^WbLt=YVd%xzZ#{WQ#mx)WuK8^f=-JYn* zC8HE|S&gY}Y%JkYLH?oIkO{5*@~NLj{YhtkFJpC!l=O^Ctxg%gq~kqVk<7J;90#Gr zVzRr(9?$ss20A8`-N`XN`QJv0d0sQKTl;6R7_ajgH7l8G=_V2Nk9GOIiO|+>Y;RgL zI`0IpU!>+7Q?)Slaa1;DhqBnB1CptDr!dv@qivJeYn2wuB4NHOc6f<~GsBvsCbc@B z`f5rF|6tuDH|c5i!6bVh*X!nZq$Yc#QSDD4%(w>SWXw0$Xzy-%Zb_QpBi0;VOx>ry z|K};$o38MJZ^iM#pa943o5+Z~v#J2UVMRAF1p4v6>Z2{oHcorl*4pjwYi|FcnhWl~ z-bytZK`RgbW0BZDlIBzR$aKWlGqL?Hns`=i^U`Ah8JXQp!TE1_Gdy~DVgo<)_T zobT)*5f`9Zo#KBBk;+k1^XmwDvuBA+R1XG6Z+|^Yy`J*_gAHmyye1Rz*jS8#w{8y1+F5%2;xl@G6N3_)kTYLvOb;~BpjXLyq(!RK#1q+#^`<5OZ6SU&@vi6B|g3tG8_BSVy% z_SN?as?gBkwH1Oq=<>kOFaNVJo!jT%v44Rm1`h%OL&){p0Y%7*od;YV^N09GKkc_U z&!cmo=8_Chz40%SSl)`B|M1;7oo^E%TR#77zU`whM+hQomfkkJ91ltV_v4Q6G7_{X zCxNn(?Xlq9tZ)8rC=nIiY@=RzZqK{Nu3ltjcrgtRcigcmne<=_V96fT;h)>W=YLJEWzVM?@6 zMgAn@y@f)8P>b*Boa*QHsp(|YeRg?hPFlc!QSMzODE%9@5Q(o?_FjluAb{B2z6$3y zLCf4Y3pYpq_j=RU>-coRr9$rDRnK3fy)UooG7_L5~?kzCHiVYh~=ACiO}GgzsyZw0ZudelY9cu%ttNj zJ!KHL1PO^GgSlM%W;s>FeX&N>VAb=Q7g_x1)x~+DLJ4>t0 zTye4WHe(lyG+oqN%I%0q4GVt}c7;1$xdeNgH=d@R_&Y$%MZsaT^lOjiyJy9!Z#J9) z%6J z)oM-=e6gViJN6rc1Wk5#aMMWA4x3GcOl-86Y77yir(DjIA=$a=(p^f_NVGio)U8b< zw!+wLn?0#J%H8wYiW}7Dl4!_?vdw&V&wp2|0(KtIlVV-x$|{A;(Opts9f*ve<0Ri* zlYSSLg5RK^$IMDEDDBr`&_g6aE{KIoe>!JMaf*dTe;U;T6iuXrU|eVmnkRZ?Q6Bpt z#KejM>%tYG*dxw-yC$3=BDM^_te%X>n5iP`Z@BNF8rG9C^gL(|#Rm-%MdrTe@yNTY z@6lkjaN|FVC9a)K6>QMV;gg?^-@YIT!6z+Rj~Cs0iD) zq+-v~EBIZ3xLvfPp4VT8Y+}FawIvoU9k;_v2k6$+91J^lEmk@z&v8CcceB{x2X8o^ zjZkZp4`dHQh^eaJnG}{(m#dX5C*hPPkd%G^;RM(+Yv6KaapR?0zr)v8rqaC%moqL% zItiJVyF#=%vYTM|ctX+{?*<-8-F?f2~eHOv;8%>U@=nqsQ$u-H;udGOy!dAVM*E6={7Wiou{f|ee*1d zSxf9cN#SXm*s606u{0(Sab!Nra|@`@wMBARzqX>4LWe=p4VNigV!8EKo6ONC!5 z5&lPK4r?+B){Lz+R`wIbhZiB{z39JXp-1n&Ep5~SQrbW3HFy65QO3=9>W3HhJ=Ga4 zb^BG~A5y)<#&~J#`S|J`_SSMS8Pib=-7utaojMIMr?)VA@*hjRxB6d+>D6N4W_if~ z+OCe+$<3lqU5^t+H|xK4(R^4kGC~`Ami1ioD|&Z!?YaqFNygB^#@!|LuPx3!3CkO( zuGYQz*gqux4|H!h)nPZ*>C=FxFpV(5pRjzFl#o^GVe!~d4Q!FFP^5<%{T(&tH6|B3 z=Q(g#TW+#L66P=N;8#_25Jp4CELOgkz{ZOf5=(~IQEV_6CV5SO_NYSPmPPjru%+vq z3xCdRO2OnWG3;e^87lOZAQ38F7aSjEkk)fH@)OWN#H-)7G%*V*k1|clDr#cO>y?lv z;pb?5MMY=5vPq?;LTe6mY9;EnFxc+7)FY(pM*Ih$)6c;k!}(^tJ=nJdbgDuVd%PX{ zbZ;=QSedgJ4{^vCS;-LWzE=LzW~cl&Jy8n>>YS#s1TcdGtc;|YGcjg7$3C+ke>!qf zdpbLXpB>hvsZ>^c%LLOC8$8igBr|V)Q%rI#+3%kJhhOY?!q>%BJhN=cq^sO}HgRIG9p=8r zCAMY7iHu~61R3b|Wa+Siw=c?na{rq{nD!_z<&>CL(%bp;-D+%zcMKg|%_{wosk<*S zCwd_T{lq(GEqMI}z@ijbuYGeYF${QCbdCtlS{)zi*Ms!#U$EMH@FzIQ`*TIEu{piL zL4crQCdPE#?<9aUN+Z_GG{FBQSzVJ6UZPar#XYC5{9iYU7h{D`PhFj(8m{@dY*Fr$ zrfN}_S}bK&;xLwSBy@QY52f3&`rw}#q&FG~cM4HmZUDAtT*CB@a;`mf;h3h6HqUn}m?^DI2j3I3Hh_;o)0y2tzbD_Tpt(}gt!j7MR=%7=* z{kJ4B#t<`s7nim}IYAf0QaWT#!cdo4-34k17#9gK4qIe*vVKNn%p})4f%ICe7A<@2 zq%yfwg-{#dbsXjTpg3Onwdug!oqscdXZTHSQE^18{2#N;uYPkDOTcE-DF>a)asa9B z+aY%Kw9#`+h&Jwq&^&&N$qh);)cB*KG&$Pl9j118ak!PyP+3!1@nZLQFT&>szw9ay z?hBqB<5_8;pDPoq;*PfN&Iqu)A5D*Cd7+s1wwrEk2W+j+{OrJ9NooG- z;VbkW82UG>5&W*@b9u-uX_m2hY}F@-p3Np7(;Lwz^D%3h4r4$TSd8@XVUOe8_u)!+ z{Umw@!~N>xvYVCU9`ViU4ny~D1&JP(6@gjN?qglp706NeU`OUiqN}R1{L_SXhIdK`t2khPUR3Gv2Fje^TZd|xK@TWt=(uoap zP{B9&RcK)f_kf0Gi8XtMsSE$DzKvEsmHbwFiLZ7?A4isONUA~Pp!x1(E9D4~++$_x zSGXsV1CKj}cb@_z3%_h!ZZmb{z+EGC8Rn!Yn=3Q*mQ3)mGF-?BpL-xc0 zV(pXtPAdk@NG&q{WqA&|CFd$$E896o8{UI8;ohtLOX1jkF!Ms9fW0W{Q@uv+;B4J9g@`ytf4H{d9{b>kIu3%m%mIEC-?-UT)n~>G_!q3Cf9hc!UHK z$|6)CIzl^D$BFOXq-_t`9w|gP(W+F^E(t%iI;0g_h8b0N{ttv>Eqt&p-5-z86Wd$^ zq3#)!UjZMm=8?4OL@rPUAL5zDGw3pwD;(Si26X@Gz=Y3V{!-HCsQ8)cmx zW3K?_857&~#}_#Mf*;+vIB`J-C@x#^f`0yvT3ZD`f zJr2ACn+JJLOU~6hZI62WM=P`#q6U_KEydBM0y*;gi@FXl!TTuIJJQqr>|nhvc0+AP zDZ2ZV_B=LVov5%eiSnsfAc69RpocDD4nfAGc>3-ILA7HgRFy6cH;30geSluj-d1}* zzql0!yfi;m+T!JUjr8S!8ay1rjaLHWtD?cLxver1SR;h&^ zH`=au{s-;lP8;ujfHL)~UuFs^y|sG!!c6i!w3t?LQ|HZ^L(<5er*_V{`mK@6k`c#? zEb|QPvITbXMU9eEtvt(tc5{jF_%FBenh9ZHHdtaBXBm0|`V}38f`3)ri1Ek->^sYU zR8Ft1fYrfL&iY4Bl0-T$wqw-2SYcgv)ZnbKIQr2o)>ZPNn$AEcomRwgAkF zL=|L0MkWZ8qn&62osE0p z_4wV^qt-^+X#uVdHw~ao8hAzfOOsEk?iQR&TkQ)?wWhdG;fam1^!-c|E;laEQ+aNo zzFgidOm=pb0buOmesuJRIxJd}u6!rC%39*6-NgDIsdM~J4BTl_x0(Y0_KLx71xL-@ z5J5Wb=O172w<=0XhiOHBumadpL%;bxi9aKJUCVN)xTTFEFa-&q%hR(9on+JMhbJ&dTJ~#bv3ipMaU_=H4$! zVEzZPE6`r=)ae$uA`*y(1hIpAQ>B9?I+kqBN39Eq1k7+x;fPb}#S>uU&~VD^K}tff z^6o7Xf0PKr71NCpl&1$<2k}*I_QkU7zkFlnPP{8By+Ab4%T}Ils8hR7XZ;`UIp`Oa zHpTsm?NPm-8_Wj7uvh&&>u!^0xq6T~LB2XFZABhwc&#k8Z$64p^qf)jE{&7KB&fqkPx!a+#H z6m7FaLGxz3>7xiKkpPa=n0kXBYPHXLa4Da|_;Uz01444q5n{bj^5pC3)Jn>xxCE?E zfk`$31OgdYLc)>>72c6(q=P^#MZBN=(L%NFjQlb8b&Z3u#D94`b+`k#E3V;hC$bB_ zuyIM0cSp>6Dw^m8b?~l1WhRA1fn@mT7IjVdiMH2S;R@}FpJTyi)n$Vs84bUbj<;Nz zVhk!8S(zxczJZ%o_o*1G&i%72rMmEuTqt-%2{|Ok7SZrD_X1k}JdaT+G{wo&(fKzA z0CHnMbg&X0%9}waTX^1gMT|BHM1JLc1^|zU72Y`p@F63CyPoJ3Ncj=@JO$JmxAHuN zK1-B4avAjhKNE1?X`u;MxZSvTth6VLr_Ach1cStP&sjxoX*q6`aUG~0@z?xvxpN)7 zn|)HWs#FusLhbT(xW<7Zh+nSym0{V3Zbsoa=#a}&X*-SX0^Q(0-3>g|>wJW!n542M z+3^4I^`2o(b?v%nLhmBIONWHsJ0d-yhu)<39;ymTmo9_^1On1KNUwqcX$eI_?;u41 z5fQPW`0l*lT6>>;u4|n$e$4W3Tr+u|ImbQjf^F`ZMhHx2M*D)JanSjco1fYdU_t=v zL04#C2C*nCMZtGs9O7(O-o^#5Q#OlJdT}vVtYB>&EZA=>#8VluRJtPA=Sz*~zA z=J7bI8mU;2-Gg#XPiBG`}-68o$Li4&GO7QS^t@w(h% zkM>UW-YS0kj;h$rmai|KlLA>&e&=+_cKms`An9gWJ>0iZjD&f zA$4pFyUCUdO`&0?46k)5-p8G(slesGU(TaHkBU|7tMg7zGrZMq*#c4yr%CS6FBp<6 zBQg{mv+}R;U5)}Tc|b}_JTcVisUIZ)clZaKpK z215~P)thg$Zeu*PHnc<#93IHeBN9D;G4-TgS8!3KT&-`s7r(ywyyQLdQ($_QF-gD3 zw~j)=mySeu22>&p0{0M!(mE1D5JQFm8|c3;LgD?a7B<>&>c#;sxW0Wgy&hk0ys#N5 zKZ~oGH4UzqI7fYg>q?(CD{RVIz+(12zpvLaW0~7lgmNNinrej3gK5`*v6Rj zoWsltZX4C3OGW`hW$dfkt>1ib%HKO3lv4C0a1_=Ldfac$QEdYlWUm9>$Tit2G61jai3-%^6&#L{z97WQ}y!P$t zn)I~!AHdHtXWw6uA9!XmzRO4Okq)dz4XwR`sdW|Jv>wdAjpPXV9rTCewCcTIqy9EU z_t&Wtyg_8-MYvq&8xDv1-Bw!J@AZzJA!Dc)*p63T=dW?HRiu84g*t|VE&0FYbj*x< zB?YY>KMl@$K6u0E^7zQ0S@512`>~Ykvrldz3x6?|vx(f0zIPb9dpx9y3K$wfMQBj~ zvnl9lu=^DarN$6x2>>rJk$xU929S0IP{)(d01+8^+yLKLM2Bq$@N2RrNgL}KvCs#B z2N-F&7v!a2e~yffo0+TXuw@s=xLNpH(!YF;?fm1X zqe5O=MjxZhPQ;=9Ut{(bx%gv7ISAiTOaztkf5y66$1OoafqR<92Wo1vn**ZHUpM50 zob*edGdq`Ko?ip0c;cNBnq!1-DN3rdzgfY&|DvH(Y7lF?eCwxjtfW}%;iC}nB&~+( z;&N$U{(#DlgF5DgOnpc}v5IQzejtHnsa=2Z0D0=@B7`sP{@}}FVyo=0q};vMyc*{p z5Itu!v|!7(WbF&$tr2Uod!8ttUsC$ZhMOX2FgCmP*E^2&AdJlW)q?BU(um4~It!=i zhWrKS518?ZiMFCBEG#*ymCgbAQbF{kMOUYoQ%wMZ#jnXCWhlpmVCM5>&gA`K=}XME zVU4RDXhM4MI0sy~vvAP9JYYGu@(It?5j5Ll=dNf_kVIXfEl>TORs9)vJav})yE#44 z4JF~x$7lRpXl}%`d{*7`(+f9PRk+EO28W6A7HT*s{1@WHR2yY4^hEqK#9=~VAI;IO z(9+5bGE;1~F|@D|UCg^1KX684szqni&JZ0a@K<+9&5?n zQ)17K`+=cSo6 zmV<;r1OT=>1t^P6yPAw1tc`caJ4I`E(lhrd4IgIfwJFxz4~shDZue_yrDH-`EY=1s z{6;hfMx8i>4*1+&=D1xvv=N6VHy4v5g*Fo~vR^#7-QSr%iDOfZgIs2Nc zz2D`hcj_#Gg%D|vee&y(b;^M!dd95?(MRn)b?XJ4sejq*6}A~W z)-#Ds!^CBdnluez!-5&MpF@RXogz|TbNtm8q-G^dIy1Oot!g42f=rCYS)Z4k*>({= zEx=>luH!!MsO4B34PfRS-yyc1s5G`Zdr**0q;>LYBVjQ?Q5HKBE>Wul&M=kq^ z|DxX;Ehdzt+%=5!ZN~Vzvjb#kME}zb7+uFOlD9;4`vq?^*mz_PbQ3t>4@Bz;g!8Ml z`XZZXgqBkrZ$HI_=%rO)pWOY9&q;Sz^a9fwI15q;#G->z>A-NCXp#qK)6aAV6~y50 zlHd|_W47`YAQQzlC!`2>hyErc%&5Wuy>~TOHN)esp*DLSg`=_ylY8j0FS!Sl zwN}M(#)Rdx*uHUqnbgj7yC&aKmW@3f^nn>^3&-OIIXl01T!QkG+z?SIG^Guc)u-a8 zzlr_09HnplUp^5oNc5>ZVY%Ty{C@MN>OpOCOvdlSg;F+4n>wbV1F&y>}faZN!ara$ol_#tkU6b&WXJwsE6+C~A7wU)m^low-%I@f0 zhupbzHh!Qx^{Jtvfo1|~^wWpg4#$RQGm!5Teyd|3u{2CaXWBx2jn)%sz0z|U>+l*W z*dhvqj};5;_V>z0$LVzrl9jwSF6_4jGgWrS(@1Z3YU^i+#S7;<(D-Wild zw_*y;&v{A6+JU;&z_2uWnmYiR$||Iyu5!Dppq`p@$9iH&6yTV;N@B+Uj5b(jnl4d` zlwUtSS{TMu#Fa~%!&3=;%B!N*&g-E?qTW#=kW2O-_o06q0MaotE*^m_7Ock9SPt+U zw>yXz^2|^VxFxB{v%32EX3J-_j*)esW{<5QqMcdMt`9Rs;xup3g`X(oiC@SBJJMoY>k!bt5S|oMi6ltNGgGTOO z67pQ&4`M?S0N|m6eQ?E{KpwZv%AoOaLJXjqcxX&>v>&qY#3y8Vn=L>9bmCp?}$dz4RwYDYs3s~b%E3uq#8scK{5p_qm?!F5dodTlAY z*wds}W%&K-RL+sJYmjpkUUg+{`&8eHlm+Va(svk!FAWT8>R#g$~!sRX}gFvEL?go>3@fy-HNg?s(`LeVLgVutjb9wQmti z`3+2nZKcluX6X$N%R(lLbE@l=Ic?mG#jQ7u0-bGCHo!_?6 zZtJCmXqK~JNastm)R1XgR&dfP4ILuRZV0pGbc#6we;MWKz*mHn@P&d z*{-v`8Le!>ae3;+*0{Y%q|N7^(&=Mbmq_x2AdQcbW>#<~RXxpZ54w*ALsQ@aal&FZ zS+|#~vfuBv4~%`=9yl%vKwn=&fQN2nE)78I_BVO^;xT+V@g9!q zJz_5-kjk7xs}*O|Bw7?|a>3|Y40A-?q*3P_Vn1XOM{60EuI18@;OgFZc2pb#JOo`q zn#pwb?!Yv{qyBdsR}R0(M}N%dFhsi9aNbw;6sWD-W<=hr?4*=$OEal#t|>v9oU?eB zmUAu!!WPIdB|`HB8J=yzlP*tBxjArBi{f%f@fGts24P#1W(u%mq1d(MPp2~t zz4J8S)f5%SfI>_~L+1T-q+bK79+F?-vJkRWs=Mvr=)uytGW7YB{@Ha_r;qs?j55ZDPXV!S|qd z++CaNQ9c$Ew*(jKY%L|)?EV3e-n-w3*yPGI&TnpQdl=DWbz*_&Il)ONh8`oVxi`!b zM~+XvFBTlnPJT%0*Oz@%B{=we;Te9^`y%7aj|KS}?h$dI2_t2muO3QcauXWMW;w5f zM1cTqSF(+r;Q@s8H?OO_$TktNE=C1RGY|~5ki<_WW;rX&KkSwhw_sk+l zT7m{CT}xAv6ipGBvMOLE2yt$cl&bJi_wrj?S1Xs-_J!me5*AHB2p94zeiKiw-Ym|x$UOtdiM&y!k$5fJ*!#B?Xx+fZ$Yg14m*oLtE2 zWNy@&E?e)eZ0V|4Qi9t42cBBn$H9}@^-te(o!;Oq@^ADVg5L8zNJ}AK(Fw~qO1ST; zie)!#QHIaluau2Sb|kKKcwU~<0Xjl|se{FCi2XtRG#qyHwE67Fns(zt3SKDgOiqC! zU5AnW{k+BWbyU&jFP>iyQ(wQJsPTri>pzk_pN77nry-#L7ESdN>>M#{{ytuwq1YYC z_uX{^QL+zAII>n{)vnsR#eS$?J+yiPzDwFX(Z?7~LNguw@brty`l`L_fp?V#Q~If; z5DN7u%Mca^D8lVS_nh2`vx0IaiF7(||EEwLWIg&{#gL}QW!U>YoLE*hpzdZi_V zX{hNUSlv6eaXRyR$vXD+i`sh_DUnemZlTLg)H>`K)+pR{i0mt`*gFlz;|;lU`8_cN zi0Z!w_(co1QGOmg=QZ3qMUw78^ET((B*m8rWKJ31!O*$UUtkdQPk~P_8;d!|mNC@! zkdpxVPy@yN(&Kqvrk<55k#%%LsY;*4Y=DOek91hsr}zEh%`wVDq^9<#ji>Ubg4Oxs z-CH}a^TC(T8WeKi_evArJYY8;e&-g?uox@dd2S9=IcGmNkG~Y?pLMo7l}lkMJCXM_ zj|TvB=$Y2Dp(pMtZbCLKW|HXOhaAo%q8ru{pnm|#dJh!bV2UW+m9wU@XURs#=(uw! z)~Ab&iJnjP09m%f>qNbvE?CBa zu%?77G5-r9d`h%3oLhXRrH6yt%-%E5v4SU<6+KqFR z*ULT-({Q&sV{x0ht@eT(x>tWxG`_bWImM5SO+<|xB~-_-Ra!6=Ng+-Iw@$A74=)Rgd>aFL2~yGB zgw&qYbv2yr;}2dcgz)m##ST?OqS@2rp+YrjMO5D`qrvO0C(mp$g_B5KXq= zLM%D7;oPA<>~oy?vh7+`Z%nX?hsAJed(a_mNLYnF24k5wdB3Jmc-bpyZh7hYv98o2 z7#93^`eOJO64xbGwF^E$UVtV0`)7yLuH_?Av-^2~N>f6P>jjHh+S@O3z)knF^ zhh1hjSv`UeWm$M=FfwqG*qBh(PRNtg7v%CKNbF$;J0a`; zUpXhU5{^t%&6uiT^=jmm1$|o3$tS41H>0}b(S(+AyVQE z>UM-G`rm`N-CKm;){om4p=68j#lzRlF*-|#wo{a>f|J02td=9tQk#SxMvEK6^iot*bxM2Hk2B!cR$PlBG1 zS`Mk4#YV^^MVz`+=QQY9rT171R;O?_Q!xabNhgiQaEuxu<4dE%|`SV}+nwb8N4ja>g5{a01; zjry}sXs*2cMvn(()Ag$4onpXbRuDG;xAyHCGsx{dipS--cg+of7ktAcn7ZfbGA|*b z35M@m7sqGCtj%ql&?k!L*Z??l^D{j+fwu)5YCBN~Zh5^&8A6h0i)4j1>K%?xYDf?J zwP0F8eA|E`^bU0_yVX^X#khD6d}*vT0>o}@n~h(dEjc&tyCa%Xc+c;uyv7Tvz zhC{kST+Pox&Q4VWlE+138H@w%gSNU{Cx={&xO-y4LMo4u77AQdg}3&sP8&FsggeF9 zls}cub1RL=PTkukNt8SFqhz1rza%>o2(HZRt1ZUyx0+izp|6tUDlzsK35yV_kZc2iwD0=(Oq$MV&;a@j~OPkTp#GS&AX{-4)r&w{V1P zShR64Jor;S$I9~dS(>hMFCq_?s(fJtRwtoho(#yr2r9~oNTw$+J`hv^ND}*+gW}`K z;t63>fK)E3J(|MV31L!`ZKu$yqTQ4BuwaaN3G>(*}bjAAf)zTeYZD@ zNLEEdG@$KFevuom+hqjmkkqpCTeIZ|sONd{^rOTL$Fn;%Wkp}pYeqiCJZ`amS}5|( z;OQ31IXn^Vns0BbZKg{z;rRfAW-v4!5Y+=FLa`BBycQ)gY zljhSqsYWMZs*V3Hl_^hGO_xR2QPAL9flr|p=-{H@VuT`Y!&0lYZDs+fjFZn=e8bx; zO&zI?Bg#z)jl;Y(|D=bOG|=y`%d)`DM4Vu29lT?};u(vy_Dv=8^$n8&F~{Ls)zdXO zD44{%V!f>cjuIy|*2A~sYvZi>D1VVv0f*GrkSMt?typgogCwbOiJmaeNlRv&x67pvPi)S&W^i`%%g-5K-FjJjP6qfTOXo) z^NI(e4Wdd+>&zlPm(e`jImn4tYZ$sn`VKk)buF)lR z>(9Ebbml0&&`}Y=E~+vT zb+o(Al_)&WmwBU@36?Fm)7IMRrx4B>f!U$KAFB>T$b?VruIF>Z><#X!CY zuq!uX%ftOkmmHnhMNi(m@n&4smkNB`FS*6F(qg}1ia*OCm`j+l=|?tj!3nt;f9t*< z`~GA$bTA#gpHJKO&3tgnt(|)T@(vA0SaaC|A^I4#3YG7?!P}5^9P|wV zj=0n`NGADYgz)0b?ioAsB!g$@7J6Tf&FjVkSUEbM(KPsj6?_DEscRK5j(|i-o+=$> zZ@oNd&3BiI1ibioZ8+-RFs%#>F91xUUJ&0-wI9^RRpTLoC}spwumQ~v@kIvtT4aKX zLh)>bbQ(B-h!mc{D^#v$mPk*xA4u>^{I8Km?~glJqlgsiQz`;D8t8g-hanJ=|c zZ^U)P6bQ=O7z6YO`^cjNybeg13-DyV;8f=EWWhrY(EaQ|S?ipnnJ>OKp@>f&i^{J| zl@=Q!jq?IZ-gx=9@T5dW-^rg^FbaxzxZFu{GkyIcBSqzn{9%OUOVRx>?}*@wNPUf2 z{t6peiBVaB>wprb8oI1_b99{K-iyIi#@NT3-G@vQ~QfkoztsKh-s}!^}#o;lF}jKVs>+3V#sI4OqxN2cK(}mSB72(J zDOX+qQSoVJrU_AZn&0>~utLD}bRXa<3;>1!m{OaeT!9_Cyx?SJek~vX7;3UJHmv(! z#cLTy`G42)zl!$v0+q2`SE%>dA4b4qlKMB=&v-8}I*f;|Fam+${ZTu6<#=povocE9~J;?naU{poD)7B`|FqG(7`S4 zpPgn^DW7NMJLA7bBio~*{1k4PDLncVo6r1lxwg?VG|p62HYljDQQ0R#h$C;CHR2lm zuznzN*?Me9H^q)p6LT}m3m5<>6XyS)s^@-4>20Ax+Xe5A7~CWH;mfQTL>2pQ(-z6u zCOP9#G4f@EoS6?@!vjVl=|Vk0Q9gChs?cX6)lnea?QVHLQ}j_W>dpv~`=^1i0KrGz ziH~%&zuDm(JtDT_Gh2dRw8wz12q>cCAApdaASOEhSh$4KojB9Bt_kLE0a-glfpgz8 z{JrH$nJG3F#Blz|vdZ*vz1~e0*w=OSYV+AI-SJ=Q*pz}L>gXCL|JKFN|H-;s!R)kMW_Ew##+E+7CT#QB{nsjj}Dy;tafWb5Kq1+rdVP)}J&ONv=xxp13e{ zeg~^Q@`9A;zkmqd`~>sMJE=4YTSA&h8t(ao8y!wq`@TUbeFvH%R2pGs6*VSib_=CB z4_45qB{tSNM3u(Nq=lT7)@MB6U_Y$gPU!>7Ye(@0kA;D~+tXC5sG?1ZZ4S@MR+vsV zfu$Y~YF8wg<|Af|Q8;=6ekkA0ciGdZ$wRb~9%5O`C^8UZu z!vDED1du25DpY)wf8hbPL(Tfbq3zOuXGB^ws@eb9CGbts1;mR{Exop~#U|`zf$=vM zF7Br?I{lz4qRJyNeie|ANEEKioV~sW_LSghobnqW#s^9AY*UN9YkRn~sdQ!Zho{@$ zznhik&mEQU`HN2h$tdu%lUU6$n8e65hS$Q$vrNCr`I-3<(s{B6snP6ePyxF1RyJSjb-2342!Yb0T?Awjz-Ef6O zBlSPuq*D;)cIN>De}Ii%l9KLa%JLxqwFD*VglOu#&_32sB+{e*O8A3jPgORj)Y1so z!}MQ^uX4ahftk<FWf~Sn_|TeM%%bd!kVM(~ZPS)#*s$b*{0^g3vdxK_$F)>9Powlg2Hu4; zi#+P+1+G^_zx=MjS?2O|`X2z_=J&GHgYXH}Z{@f99%bMb`D4&_AgZQUQfssi{*qia zkCibB;ns=Dzxd40!9p7btNTf%nnj)`Oz$f&5dxglP2^i_Ru50VILt*_%U=|jsK{U3OzrW77zoVkzo(FkvK z4czoXpjNV#G|-9H{m=s_s3J~IvQ%2(D$~?cIk`4@n%R|yH_eYS*(d1RQ-0kGD3)lg z%?R>7getlsybN>$To5lG)7f4XTNd_{Ym26Jq*X$XNBw6`?Db)`;`(8t1v(zjYcceFM43alcIs039D|o93kpI z+_xK`bp|Yc%p6un&jorazkoY{ z?dcNvggN|NGZ7g7G?WqG7D`xPTgY;>RTB}`+;#vh%WhQq|7isOeF1E85eRyWa@Qi< z_l5c?#cZWLG6<|! zE{n?IwYZ+%mDfGb0p@wr8NVx_T|uV1tJXX%@Z_XP!lr1x{%_yQx-BcIUm+FPx*8<~ z(fzp}YF?(MFN-)uHY->;MZZzsQe@FqmTWl}mSKX=_CDQO$sd|I)nD{RT33b2(-T74 z0MREk55?A2$t4jkM(OzQ(rw;}hcUi2&zmm-gx#Jrs>E_k z5r=bl(gOXm>7OqW%{hmUnU``f5h2@7-$VATX zzo03M@GfzZ?Nyq3^9rEj*-N-z;*Ic*$AMi@2Mw^aD4eWdEeRsL)82!Fl51sFU;p@>#r zDmm%orW$ABE(2!T-=)DpwAG6>E>Ao_HL-1cI;`H7rjj3=qOfj7%XJY!$xyIGKUy<= zYop9>{YtjDlX$*PY^*l?= zYeVvwqQOMHikzG!N={{RjV_wA(jzs*Yb4uneVZwSJX|+?(ff-hONt;x9a7It5UPNX z^Q|}IZ-)?z7ABR>ef_T3FrJs0v~if$YTw&m6~wcyqVH^6T7r|zUOLZt(wg&4Z~R86 z4|3Tds;?k0AnS)ZGWrJ)TJJC0-7KPSmTOf!T`vNYo_v0&wM=*Rh>y;JZw~g(&cvl) zv@h17=adR$ufn4`ts%vN2v25VzbzwPn z%Ff9u{gv&^g$08Qdl75t{LkO_m_U5%FuIb#Fa{kkH#X0Os6cCO6ttHMOmmecq`T_S z1D=mhVd%GawKy`O2XT9(^973MhX36L{-+bLIgaFS$H57|Q^J#KL!lF6LxSu~o(b=p z>|_X{(Ya>=7{~&cDHBNEh-xmOGn3CrjbShwANfiG$|CEx3tTEev5=;h>*=Ye35AM; zSk|VZUN=^DoCO#dSA8>+`onE^YUGF}_|kr_Np7|Z!Dnx_32o*}fX@N~7~bAHiV*#( zJ+}%N>@KPk#K{yk+~N`h5D`!U0MHzzCv|cei8BjXf?F1>*ZP6_!$*qS6}*pF^2SR_ z=Vn90m%EIpRv&Ig&9$g6E^ceq(Q;U}{??0B<6>489e1U}!A;yY4@4L2o$vfqcC#4Y zI=0TY4>-@rUjET6=Y0`2kOwn6`D>bP5;TOVaGG6m#zr!pl5Hl4sS8M2-l)#1GgwQS zj@aPN!CLm}D49sBVAk<*lo1=pB(%2z&eQ-a<{4@7L>weAEk9zbti2$HXKG{hXT|je zLRicrhpU~qZ}TReqjt9e+1zKtFZ)dI zFV(9TC%xEvV(Lz_K|590VDP%i>Fk8xgt~NE!$6AhL;Gg%L;&KWo>g7=9HLYuPDGzO z1EQFR{-ttYJ#50PKcxy%6XQ@}*NG1#2WO!y>LQG$5X7x0FL62NNhZlsJbn;J2OlPq z84z-A$C-+jo3VK5*OV3^9<)I;wLWI$pW2ErffZ6Cv}^Fpro!O_i4?+hc~nIp1~;h- z&pOp<oT3U3brx zr6-mr?p|m`^2~kMa6>DWYw5M@U7^#XL2HZO`37Iz?=6;WtC5AoH4>c6w5+^@hNJTs zt5+;ut0(D@%Ld(o$2?F7g&pET!;a$cG7&u@V<|^iGV`U4BOtp|Hl%V{_ZRoxFGEsx z;ah#*$B)#Q7S5SLtw{stBDcULRH!GPvCvyj=b1%KWSC0pK8&iBd%pMgEqH7W{`~fP0L5l5{n)=?0sx@j zl({wrw($qc9A-3jTZQ(k)b$TeH-se(pO8i!E(RZQJFwr+C%A)l6?dwC(y+G-39+p3 z_gWDh&jf)ROvZ=VE9(d3QeX92BU`YnakCv)UbbAtmMan^{&)Kik$uFAyTX49BS3TJ zIq!X{qicd^Rw}nmjn?M6v%DJoa=*#O3a}!dXUndi3SXyNq5Jds^g0^$qugxvHu~O~ z^h8Z$zZ;RR3u)7-tIIkdq-Z8WXP!$m?)Ke~PxWX@9F4O!qNfXuFUyl2$<)Wf`qGxw+FU*;jiA=w?Eg9{d&F@)do#^j1XVbyIma z6I%63on3@o8EaoI2-rc9!qm`dJOvVOqdk^uY>ZC)KX7@l83dZ*EUfuF|5gh*?8g4= zP6~V>D=Zs!?U`w+A?ZiCSO|8(rudq?Y$!F-pizMND`renI6yNZwiUn9hh~-*E7xOB zzEPQ6w+mEuHn0wfbD&CC!SoN>=@3r*0`&uwD+3*1c{56KVGGoCu`po1a`_AwI04xO zL;&8!W8z8Ie8cUKbf`C=XJb(A^tQNK)S|Ce0sqnnKxiHoIkO2^xXN zugEE6Eh_Jy1+e_m<$P7N890<+lHDy?4)dyFa7N6nUqRr!s zW${vVnM|d9Zaq~e@f)j(yrw4a@|6Om`k>7={8>#(W+pKB2VdI+i=FBheU?!TcM#C0 zt#<{vpC1vkwtXEj?~uPBio=1z^6bOhl#)b*%d{2!Y3*ARtr`e+4S%vP6dsp*{q)>n ztE&lfF*4qEOm+&KknH0UKf7RwV?PecJj6vbT%yub3E?bN&<{q*92Urs^ET`s^*gmy zzw?=McEdh7XFs70?)?XF=&(3$KNzx3okIUY=k+9XE=lQE`>_$gSBI2^^ zZ0`B`G&#eB$vY*=Iy|CRtlJJp&?VBz!VdB^bdY|%Ec$ucU(q$^AqrQQ;m1M`&4%WR zURMjoEPk6l)OpeJ2N5(iPq6Oz@6Hnc-{=47RPXJZ4~1U#aTq1B1WD2uG9Gs691eTH zGjz0%Wx@0Ng7Wta|Fcpd@X&$ch+UFoBVCC5`((3ui{+}aZ$8+Q#Z8a#q|j)C8vRGh z=g_49XSMqRb+wrU0Th8Yr1+VW6>6K#g6vv)Ax$t}gD*(oRix0R>Pa*oQ>_{ozZpC{ z?-!YU`Y5*SsWRV$(mNXGm_*L+w|fYRK|-q}y2B4(nSBe_k*_MUqxnAF&dmS)h`R*R zV0yJXqTnPQg;4BIcCIekS_+C9=ZaM6C+n`Q{=}T@X+^4=H%2$hIa3+QrE2SsjaJLD zIe1&#*(L=4_&0!JQg}UpDw^q}>$68C)zm_Z+KBvZd~Mf>u!&({>CBaEXQ+>t-8PdK zkBY)GGHSB+$9q96wi+~xpu3?=C%dB0d)j7$jUYz=WQSVQ&R8E}^O-2^rwAV**E$P1 zCqo+Z>Q+CnXJ5iwWy#bi-_x*uhLmd{+CRdpw%SM34kZ@JyFS^r&@Wv-P%NfinY^IA z<(LuM;jH-6)yV&c`b)-2xylo z7%GIf3+q=RK9L~y(t66+yF9DYnys%3_KQJl1@roHiKhEc7iK+Mv*H(4Nubf3qAw$0 zLZOto{u%zqX&w7|ZWi_^eYQcRu&jP9gq_|JCGno;bmA>d_LmRDosKWvw-@#OSsRk< z^yRL-QlVQ`-VIGEOid!A{UsG_>P>&r7kc~#Z`*>FVs&Rn!@ntAIDyU5Ng9V)467R^ zz-I?vs9H|mUx!9mZT))SL!JTAFI5dFEV%cc`If^k!2l$ANGB^0*QbWMRZgAjDl#ow zPw@JcwbdgLELDGnqTXdtIS^r4?gyh$XWMD2OsLeFGSj%<&V2;(p0w%%ZU6nuAg0e- z^(maP?XfA_6T6|z6_4$f+3a>i(gB!m)I^l|8@uXt~uo;sUD~PV%>%7be#x`Ue1P10AE99gmYRR&;Fl6Y`GC|oawE)0vz@se6 zSc+QSbf&6E_fd=(r{9O}Pu>BdvZeF3HKvflJ2mKse&>fL$U=Snp$NULP7Zi~vm+tj zm95B3aP1Z;KvJ5lh?kJg`7dROy7H3+J>~iq-ABr%ciEwO4()ZE*bnUmWq~W}-Rme= zb1l+^q8PEOBzaiuNtpbFae)JE=lXeh9)xGaOg@2v0p6%2`y#NHU zGgZ5>bBJuw2LGlyo0IG8RIot+gK0lD$Ywr+1+QvxA@6&LF;nHJBlfG?8JX`&I(o)` z&N`4}+vvw~k<=5#ndqb`WCw`W3V-A>Dp3AKf&uF+-FK)R(-4c88keh2di0L zuQXFc0)E@z-w~UAQjR*DA?R1wU)R`GS=(s5y*qZGnj@R>%5=H5S5|hJkp3cAz3F>P zl_}aF{22vSWCX8o9o2pM?Ec}PiZefq$s@2MUnteEHN(1w%Y{5rv5s@&Lm{2%05gY; zVLNw>GpQJT?P`P5;5p7lK8qW9e!Vw=h=^1)JIVdV5bQU3TVKJyTkto5L)(B8Ht(}2 zMl;HTReba`pUNLAARjvFf&H$CaqB|!c)f3`{;-}vH<1$Av;ZN!c14OLt7(|?T-@8_ zl>Sz&Q=1T)zS&y%;tQML(ox0mJ^|%o;S4(C_nqPTW0z@idQ`Kr1rJ0GT@%*nt?Zla z-$wXRk_M*=(pGD&qYn|XFB?-70uJ9bG&U%uCG2vl9JCz{ti4;Xs{g1Y6#TUOFl(he zYrCVBu9<+BL_c6UVCipXsu8|0Lb`XP`;A{e{TS6|S;H#QUnZ4i^R*imkoJyeuCa3Q zq(*3ye6DA6kXH*p4&T$R{tS`LNTy-a1QgsNSaHgvw2^%E){(T`M6j29G=KgalTpv)!xQMA*om0R%oAmr=Owvqj;EAo>esZj*JM3?l3B1bTi_;TDV|a-1GJa^H>r`3U-17w$nxam(^#<#dmZ<+ zrT~CtFQ+bD*=7g+29`9vO7bOlHgg%icsK=s|Fd z&9scXQNdOBLJlKUc*%asSD09p-M=l#VD&@6^M29BODuTAA+z?pljpri^@9H_m-R_w z_#_R~ZVEt^VGvwqTUGXy$ufBAI58}{724ihGhnMf?^r;?SKvo>AU63^Tr6hCYD-W< zev4=2Czwvoxd`P8W!Pq@!uu=mCDAJ3Ek-hp>az;g)mR-$b{g*`So?KL;2$vmXwo$C zkicV0OrR$TsA5Tmu z>cg@>jTC^X==C*(`uN>Tc&j0IAMv;kzP|aW(zEgxF@o; zT?mkzn){N&PpNorMPxIE2mq*o$gr9PicKeDv5PoHKq^ zDWZz?l!zbcTGM;00Z-08@k6-qaQ!jbPM}QMty?~Sz}ooNZO6sdLj%6(|dPpaU7KI(K}*-L3@CLRD#m0x(1sie~Nx9%dMoczK_N9fXu z_t11NgFHd~A|uM)6B?I$o7Jy-3vN<`y|I-)x_@ZX6;5oV`Q9p~x4$unA-Ez#mM`J5Nj*Ls@)^^5a6BM~RC?U)4jn)Jj?pt@-g3U0S<< zghXIPJ6{UmcYFj8j-e+Z)*_{0;a?1p@aiQr7ZNL5NFnsuY2k&H_+e|d@kWPm$B=N2L+~>H%^X&2yy|Hx-~EKt zmN*%2d~}AVt!*_{!=Df^wMkp=C|t`_S{ncogtqwY5R%0gw+7fPU>5NIRn&LKHIa3J zP7=BVfe;7*B=l;45I~A-1VTr8H9)A+n+T|=K@=&1kWd7qNKrzQF2xd*jv`elK~Vz& zi!956Ad34&_j~W1Kjt@?$s~8qJ@?$x&+>Z(r?_cX?_OUCUE+rx(i{8yQSGW{!!M`l zPqA6GNSPCJb-6Kh#6`{fNQAqLy;5 z(hIQfsDfyCBCz=O)WDU2OodTtr}r8V9g&ndm7{{3+y*BKJFPk;1d83d&d^w_gUT0O zmM!a)b*rX^E7p|`YP-U<)iaPY-oBpt+ZoOepDQ`pcEcp)PdXlzGZc8bzw7&t2m$Bh z->Ix#q?T9)7yRtlO%8eli^ILQ-%(c2ZP}s-$?4?lwx1RzF3NQ%6n`wr%oxr%Sbp6C z%W!sfc?gEXlJERYLmgrVhH}#fO|{OB=NewjaVhC85N#jr%`A(q21UlTM_7TWW7u<` zF>M~}u#6`5XkEHHv$knw9OtH46plQ0E(H7Cean^~*S>v5IM=2e?9wWZ6o<*@>a#O+jP*XVD}o6v3PQvSO#;>q+)KbP)wR?TVq z^KqLCx08!1?DD2MD|IjYO~)eiR7}4$-wm~wqk5_@l(Bt?2^w*|6RHtn^@2wg6Wvo8A!(i|X-LPIIjLjq&)R=4(}z$G><- zs4AgV9z4bFEyo6^XkGf_nzrA0h3z!w0Fltc!j$>>L(5xwEmB@;MSHzgwRM%Sp%Zb&1B4!MvbR5k+Cs0RNTGu+k5fuuOVwV|=0(G+^%OpZ2tDx6Rd6 zxyZMZbFO2c%ufPOy-rhh2&e5tCpo&%WEbR{wnC9J+rF_x5`1TSe(&6(#OW-dwScR% zS9xaK4Nr9ScV-+@4je{SFhP^l5!@O2Xzc0v^GYt>PT^`kqn4#UD!JC;p~h>}eccBl z+~N6f2~meqRuR@xOXjQ^i@Q3oD`=H;u`#N1tay-g4wqO) zR#BpI^CdEKKTWd^3gokk+e;|xjv0HLsMZn-?+o-naD9`~RK|u0-K;spJi`7F#&>5c zQQE5zoAS=L1EM=xj#AdR?D0Lh;;rCWTk1Ri4K1GN6+%qSXmbc?jjW6n;WC)+Y=J3G z6tgDr)8~|UoGZG{8vHP}cT;ZXUP8D z<~bi;iJ~&Z3i>?g<@y(x_^i#^vsm1dgMY=R( z8jMpYU`XK6LGQ5px{9Oz^?F5}QG~cy#pgQ3xZDT!pVn?iJiv?9?AdJQ zOsx_>Znn>cx;>k8y3kU4Afu$c z{n}#9Z}#SW&$Nt(dM`uZJNo|ud_6)n*uMU^;)?HS_k|otVy0A2xSx=~wFiFYQ=T?) z_p0nFg)+=}|8#v|s(*ijqb1jeQ%PUe=fRAbeOGk{^H^Y|<4KbRo7&6L%E))6_y4@C zM$@7n>*=l2 z02IKQccnqz!jVNS|BosB?}$cH;(fAD_ea=f%l~8)lhCq-xXeyyyx}M2_q0^bu08(A zPPr(4WCxS_-xDy)PhSBGULwEMi0pWp3hpMt#6MFS78t{t$dSuPlF4Xm1UQXYZD+;% zHBAC+YxrL<1!9S3++H-9y}GsJQdFXhX85UNkA1jBvyC}i*QkM&~^mrLz-ajl3s&R?lT^zF7sfdoB7G^)&#A z-1C`#tb6_o#ISZ1LuBl|a5iOzP63Mg{cVSogM=A}7x76RUpnd_Ug`D?c=<=yh@?D{{jx* ztXH!tY=yycmL@gBTWOif8Dcio9-;TH@1DC!@4w7=WE8377HaEQpYvK0k#P`k;$dvx z4@;`+KL}WZop)_e&16+q_+5>^ZS;379yTKl9Bm~nK{qYqSaHdTXd#oj*Qopwr}s4z-PQoYSokqW zhD;L-4C*ugyfbp3NguGeahX5yjdm3JNM z#aA*!tsFg$YWP$&JQI2C6sH%GSQ(b^@#36C_}tYTQ>5}iNzU8-g!jqj{buZDLT&rc z)$?>uEgpN?cOamwv;%wfz)@Gvce6=%O)Xb(ytb4di9BbLQa(8}VV~dO<(2PB+Dd4q z-O8;@xoy_3yl8ZU_0Q?c7vCEi-ZZOqUQ^O3Mepe~y)K@78yn)apIF+jy#1%_D=WHR zLqCqhU+IjSrl}}K3BRyn0;9neZud>9sG~fIx4Ay&5fe3ex@RP-kH1XOJCB^R;pa>{nL)V;^1g|*WyyCYD@5ghYrOD z-m|YlPI_dTrCiYKeiv?q6==BZ{{Y?-_WiEO|JEU-{RZdH?g3wD=(b59d zzksV|G|MgG87zv_BIysz3y#WtSPD&n<|jxoC+CE+?T@KOT6}ytxYT|6WpcImwH)r9 z!67r1UQ<#JlQEL?<5xgq&!tT8qJOfTN*SK|@_ zDjY)Va%s`bsl=->@ZeXZSAMWO=jX#TT1*dj=$Rygx=eKn0E|756>+5UfM@>?_~Gyv zC|O6qt#cV63$>~Eg#U&ignYcyA&q@;4rV5lqhsNYi0gxeH-nGBfis;_J1oGw!rkIK z@Ql@F(`ZnkM3c)X7!heQHO*N{sUNw?F02#)hr#f$ggAaNhnHcY2%d^RLf4faBU3+yT67QmJ6JGB_Tj&AbUbh9h%vG2LYYr))z!L(eaKRr903H1B8E zVNP=RtmEOI=_5_o+BEaLkM0l4e1PF^lfAAbD&hdgw<^k1UG zmxlnY{QGTBoCE&_zKM3c*YQ=-nr08w65dx*d|H{c(jdut!=39CJuT$wNEGTCnzrk+ zG%t@k6>F!sCy{+O2TOL&F4L9&a@&BiV3n)#RkA6Zzp84p7sM0l1QKdyv#z?q~Zq)fv5=CpC26JK(ColQ^U95PzXnyY7?Kbs0~ zJ$y+2C{OYZjL`IZA695U`llxJfvDn4{B4o-IMb@LN2V_2G@O-Crv|k}57cIi>iO6k zRulDkJ{D`{nOsv21A8(fY6Dlwba|HEyn1bS*mSPQ_D{M}y#E&szhHfQJEe`(3*HR(gm*8(|W~Q6)gAW`X zxm1`RAJ*DU%54|KvS$UV`;gKr6a_7eP;br_u#lx0~>H81hgQH+9ZBA96i@%;yDL7Q zXi*{p>DfUryhWuQo&nY+-~8vE`8@r^Oi!D|XWucpY=(Npm*$^&*+?VE*UQ2_a@>%GDTFM%ApU!?ROz+idX>_amZX6rq^ zK=%Qc%ba8vG{g-p`6B~!*3ifx&!8^5OIgcUS}sY@y)%O<2TZgiSoewL($`CV`~#R#?aZfYuR;Y?4(M+S1qMZ-Push(oah~UFQe|?eWV#G z+FrwIY;Z0PUi=X8GShOz=l+GDD++&>f1tJ%>1SE4&vm>`NO&h6JYv2qj*>LeM}{2ZYX>y&9~fG zEcvloRg+S!i>(aK8nl0kYCM>I@%m~ztp2XY!f@l7uXMOQ zVBJN;_O29I0n^))Ez=5LZGm-jbPV2y^ZzUU9>cv3c8NN-*=|U)lrwqP{)5<8{@%k9}bkCK{8@ziUSLz)!;Wq z(!k26zA?Cacx{4qG;2VLw-Z>xmT$K4yY9*th zY4)u!G~exgm&$A?GhJP}zLZEt4?)2iNUkWsnTfP#9JbR=PBsE}C5=s>{^#-jh6K2h z!EDl?U>C>V8+coX9JB+3SL!?ptOF~RQ3G4DEz2hFEBcw#=cbDN3$zTam^Y#DMXdl- zNoO@uaeU+|(QWMA%Qs6Gb~W^=O`{F{!*R>PNN>83naLEm(Xwe{z>Oq{0&w}lENydx z{5W^rCRbGKODolF;&ByU?A}#879AA{mJdnDo1Hgbsa?pM3-8j@%ZxI20w7g@c;+@# z2|rVau|F*he`&~o1nOt4tSf@4|I%}nf#phh#wFSj=s0_YKQpsKxj*0%OIs^L_W&jw05n*rD; z488RiFF-Bvxenb_JM&H8zBR$Ys>>wF#q)*405@cbgAMjealsFEW@FQPHpL1tc$XEi zIq>)`9{y?gd*L1?B&7G9=s~DQEaTj6S=Lm*I)?!x41>_9s`lBab1iej0UCeac z46Ik6S|%V`F$IA~feCF_;KEQkuaMjtnv;P%O3kJ!VGwdfb5Yinu}*;!Y;R2+HrU!s zx{kAM&hofQ&Kl#!wJZMiLs(uZ*YoRj)wE?t`|HG@UAiJgrB5%LMtpHYKZ31pXfXu0 zjJsmuhPgFQYQep`NU37Dd(V8xAul6(?Iw>PhwgA=B$6S5jK+&Ry$&#b6Q`X*G22Ps zpx+LN-*gDEx@HMFG!>uOH`C#ReY5b7(fiHCC}6Wc_xMBl?22Dc8IKQfN5G zh}z3Wk;)A#`4E#+f>jMZ#!J6t?qi0b*neh8Uxz`xPu(+UuDISKl$Lnk0~5V_IbGhM z!;V)km8>+FNID(C*QCm8|@vPnI1Q$9t5^=2j1j^Xv;QlTRt?#!rZ ztZ!%JUEk(ZJ&a^gAN+Y$w#XkIg;Dftq78?H)+(0NYl9``XT8og9dHbYp(1^r+%6B3 zt2-}7^UnqV!p4^?*R)c60!==(&vHZF^($d(Dq=(e>-;1Ogf&oyBlA7EYgNZu_Mfxil;#fjYfazZwi<@Hqi7PF0p%GfvIMhiy&Ur~>}p#ut$-tpF8I9( zSo1xz!Sf{#q0)hOIUeKf;G8r|xTdBmdpv&pLzU$PT#~S_T;%H%wr{BJTjk4cp~_21 zKC{;178o;sm>6BM1hIVkm@M@$*wbYTO@7_N8fXyl0G27TC8(Aa_FlN zVZ&?U3zm0du)TncO0(%zAbzH{iL}>A&Tq^shQXiyAE#K8vh zG+SJvBom>L5U-VHx0oYnv)png5v&t}TnNB<<`6cw!0r#d)P0lNOu{YgdiI%4!KN)p z6u9g+04X8)9*C;?z_vLoi_fq2aO^DDHWl<4{g1-fjj5@(UzOs}zR?K=brcn* z5oTs63~*A>GPp*FLY(<5h38hhT8!0$A%1qtg(mZ(8&a%G5%w~c%i+g3xWSUUosCY~ z@_n<{GE%{TK>6~7mT0Eu9B%5GPVAINGwX+I`)xE9MJ=(oRy57DU08}#oCj|qaNY01 z{#9Xu+=J-v`hq{`+%SHR7|+3LErt7(=gX6%T+?<#Xpp9S;fMw=XCNe+d`(5A?oNrI zYi8Ct^WS*pkjm9y?}PD!GXjL$PTJ8AIEX}Ivdo18YaWU(ihVE^LxizZN4htx!YNQ! zw5OZ;QHpI%lK+uWSM93C#(mtXr(6Ox=K=Op@U;QA$UqO9g6JK#hxw9H+;iJj))7nJ zn)qjWrHlLDF0pLM7jhi|Ry}*2=<)L$i2oKJC_V^igi~nn%3m z5-6I+hZPv3R?Oe^gFV%spXfDNBpvbW>%X*%_@>M;f$fz~joC=Y?+GaN_*a2#Yb2WV z#TU$9)Bqd8d>k#Oq>oeWk~g(gNNgMO+DYz7s-Mf~C0GT6L+fT`!henl@P&EOTn4z) zOpapqJLRvDqLA4D@;6d`AK;*`F@_vVd?2XgzQexhoWz3o#~rzfAuu3@1;9R={6Hf^ zjTuaeLv)WQj#NbFP0(CeYxFy$&*3_;^#V@}BZB-aip%9H@nHdS!R+!{6a?2otEh^4 zmwCEKjqhz$GSu2P0E$W>OqiPoDXCgK>#Py2 zzciM9fxxP=7zQZ8A$~_SVE~wLi~e`MEBDl8g9tF+V{Cwa8wPAnX%hoD4~dfNlKpRt zNc2I-{J%KK5I;PXuVzc#ZJo{Kyx;lrC}GDP7J6|(@hZ;XWJ00KGiY$q@oVH^vg1%}uQHKXE<_9EJn8K|r7-@vZoFAV{r_!| z(4~hlUuo8LISyNbJGmSEY6MX*^8`fN|3GZY+cJ=2=lx_h@)F-_L20E6?m@Q8Iu}`< zn|%Nh@xmL5hFlwxN>W4V`rq}RnEfY1e77=~)TA7#4H_0>h^)2b4S}$!7UrsO)vSd5RP0vzfn zWcm_e&d3+Q=@kHqivvqU-m&_qsvzrDF?ID{>4+devxg;(2^;{!_%>Yw%1S<8N^s?~ zre70ON(3k~H9AKZblyph1oJY|-;3n`+Wew#mj2yauYrUS? zqOb=0ZzjG~BKLvJW56%?S+F(@QOzJ4= z;)+?~j*irpc4JLgG7G^fkx-pjWeuk34Ec2Q(saN$)vambTJ1r_j8mv1x<^BMLvc*4 z=C3PIK~L-T)B9;vJ)$GyZaia3e+_q=I(=mQ<>&9)&ZliFU4nc`0IT9y33hdi$T%e8 zV=a{mEsIGa2n+K9d>1FTp_^hq1nWTi1asN+FkTSztVpvt1Mn?GGiaDM?1fVonrkfw zLhVwTP^X54w5>Y(7#u~3R6LB@B2|~yWq`W^xOA{Nj+rnl53T=<$`nPOWKU&)jRSw- z6@dPmLVRd*pCubgeAi-ec|{}y=n9rdv_oPLqeb174v``!;x2T}~*;m!C3SSPp2 zhvup&ksl&AN1+LR-`SHez)&A>!$qI|y&|7@sV5eeBOz2aMHB8AdpTb!<7YmNg8936 zp30Nk6?`uEqN{b&H&pz2uM0((0P1=7&Nq#hMOW|`8yWNXQo9Eg$w1GoJD=FuK+;-X zvDlR)02L$@7C0hUdxWDYLIq$q3Lo~Q0?_{jU$J9Fvwu+`4`~*FJM6W65(AU~VZi#=29Q1zB?91Ti2!hi z0^|<^FdPMLA_e@SBn}1-$j)H5>7ew*0Mh+mk23%;a*F~|=n?=Uv!l(DnxGX&ZerN# z(7eFP1TVlDMP?V($sqv{S;sRTfU0GLO7mSXajwJtu?BrOiosjw_NqPmQK6Q|b-;ek zL7>_!%$3OiUO6$NXiPxLg94ejJ}fOb1aP-pCQV_4T@6Gz$>uT!@CVolzc2nigj