From 0e8a2ae701194aed7a7f5d6dc0c727ed0399fab1 Mon Sep 17 00:00:00 2001 From: Eric_lai Date: Mon, 2 Aug 2021 17:02:39 +0800 Subject: [PATCH] Update TensorLayer3.0 --- .codacy.yaml | 2 +- CHANGELOG.md | 112 +- LICENSE.rst | 4 +- Makefile | 13 +- README.md | 71 +- README.rst | 38 +- docs/index.rst | 2 + docs/modules/activation.rst | 79 +- docs/modules/app.rst | 10 - docs/modules/cost.rst | 9 +- docs/modules/dataflow.rst | 79 + docs/modules/initializers.rst | 5 + docs/modules/layers.rst | 57 +- docs/modules/models.rst | 49 +- docs/modules/optimizers.rst | 13 + docs/modules/vision.rst | 204 +++ docs/user/contributing.rst | 10 +- docs/user/examples.rst | 38 +- docs/user/get_start_advance.rst | 218 ++- docs/user/get_start_model.rst | 210 +-- docs/user/installation.rst | 49 +- .../basic_tutorials/tutorial_LayerList.py | 75 +- .../tutorial_SequentialLayer.py | 46 + ...torial_automatic_inference_input _shape.py | 95 + ...tutorial_cifar10_cnn_mindspore_backend.py} | 51 +- .../tutorial_cifar10_cnn_paddle_backend.py | 166 ++ ...utorial_cifar10_cnn_tensorflow_backend.py} | 16 +- .../tutorial_cifar10_cnn_tensorlayer.py | 181 ++ examples/basic_tutorials/tutorial_dataflow.py | 84 + .../tutorial_mnist_gan_tensorlayer.py | 154 ++ .../tutorial_mnist_mlp_dynamci_dragon.py | 100 -- .../tutorial_mnist_mlp_mindspore.py | 117 -- ...> tutorial_mnist_mlp_mindspore_backend.py} | 12 +- ...utorial_mnist_mlp_paddlepaddle_backend.py} | 19 +- ... tutorial_mnist_mlp_tensorflow_backend.py} | 20 +- .../basic_tutorials/tutorial_mnist_simple.py | 51 +- .../tutorial_nested_usage_of_Layer.py | 19 +- examples/model_zoo/__init__.py | 6 + examples/model_zoo/common.py | 3 +- examples/model_zoo/pretrained_resnet50.py | 2 +- examples/model_zoo/pretrained_yolov4.py | 5 +- examples/model_zoo/resnet.py | 60 +- examples/model_zoo/vgg.py | 66 +- examples/model_zoo/yolo.py | 33 +- img/tensorlayer_v.png | Bin 0 -> 27496 bytes requirements/requirements_paddle.txt | 1 + run_compile.py | 74 - setup.cfg | 4 +- setup.py | 38 +- tensorlayer/__init__.py | 1 + tensorlayer/backend/__init__.py | 2 +- tensorlayer/backend/ops/__init__.py | 11 +- tensorlayer/backend/ops/dragon_backend.py | 1049 ----------- tensorlayer/backend/ops/dragon_nn.py | 910 ---------- tensorlayer/backend/ops/load_backend.py | 15 +- tensorlayer/backend/ops/mindspore_backend.py | 94 +- tensorlayer/backend/ops/mindspore_nn.py | 625 ++++--- tensorlayer/backend/ops/paddle_backend.py | 44 +- tensorlayer/backend/ops/paddle_nn.py | 400 ++++- tensorlayer/backend/ops/tensorflow_backend.py | 20 +- tensorlayer/backend/ops/tensorflow_nn.py | 97 +- tensorlayer/cost/__init__.py | 2 - tensorlayer/cost/mindspore_cost.py | 56 +- tensorlayer/cost/paddle_cost.py | 18 +- tensorlayer/cost/tensorflow_cost.py | 23 +- tensorlayer/dataflow/__init__.py | 1 - tensorlayer/dataflow/dataflow_examples.py | 56 - tensorlayer/dataflow/image/mindspore_image.py | 1539 ----------------- tensorlayer/dataflow/image/paddle_image.py | 19 - .../dataflow/image/tensorflow_image.py | 760 -------- tensorlayer/dataflow/mindspore_data.py | 241 +-- tensorlayer/dataflow/paddle_data.py | 147 +- tensorlayer/dataflow/tensorflow_data.py | 420 +++-- tensorlayer/files/utils.py | 24 + tensorlayer/initializers/__init__.py | 4 +- .../initializers/load_initializers_backend.py | 4 +- .../initializers/mindspore_initializers.py | 258 +++ .../initializers/paddle_initializers.py | 55 +- .../initializers/tensorflow_initializers.py | 55 + tensorlayer/layers/__init__.py | 2 +- tensorlayer/layers/activation.py | 46 +- tensorlayer/layers/convolution/binary_conv.py | 4 +- .../layers/convolution/deformable_conv.py | 66 +- .../layers/convolution/depthwise_conv.py | 2 +- tensorlayer/layers/convolution/dorefa_conv.py | 2 +- tensorlayer/layers/convolution/group_conv.py | 3 + tensorlayer/layers/convolution/quan_conv.py | 7 +- .../layers/convolution/quan_conv_bn.py | 2 +- .../layers/convolution/separable_conv.py | 24 +- .../layers/convolution/simplified_conv.py | 59 +- .../layers/convolution/super_resolution.py | 8 +- .../layers/convolution/ternary_conv.py | 8 +- tensorlayer/layers/core/__init__.py | 2 - tensorlayer/layers/core/common.py | 122 +- tensorlayer/layers/core/core_dragon.py | 765 -------- tensorlayer/layers/core/core_mindspore.py | 202 ++- tensorlayer/layers/core/core_paddle.py | 68 +- tensorlayer/layers/core/core_tensorflow.py | 551 +++--- tensorlayer/layers/dense/base_dense.py | 5 +- tensorlayer/layers/dense/binary_dense.py | 9 +- tensorlayer/layers/dense/dorefa_dense.py | 10 +- tensorlayer/layers/dense/dropconnect.py | 14 +- tensorlayer/layers/dense/quan_dense.py | 8 + tensorlayer/layers/dense/quan_dense_bn.py | 8 +- tensorlayer/layers/deprecated.py | 25 +- tensorlayer/layers/dropout.py | 5 + tensorlayer/layers/embedding.py | 29 +- tensorlayer/layers/extend.py | 2 +- tensorlayer/layers/image_resampling.py | 24 +- tensorlayer/layers/inputs.py | 7 + tensorlayer/layers/lambda_layers.py | 20 +- tensorlayer/layers/merge.py | 16 +- tensorlayer/layers/normalization.py | 22 +- tensorlayer/layers/padding.py | 19 +- tensorlayer/layers/pooling.py | 142 +- tensorlayer/layers/quantize.py | 3 - tensorlayer/layers/recurrent.py | 1258 ++++++++++++++ tensorlayer/layers/shape.py | 5 - tensorlayer/layers/stack.py | 11 +- tensorlayer/layers/utils.py | 4 +- tensorlayer/metric/__init__.py | 2 - tensorlayer/metric/mindspore_metric.py | 1 - tensorlayer/models/__init__.py | 12 +- tensorlayer/models/core.py | 305 ++-- ...et50_weights_tf_dim_ordering_tf_kernels.h5 | Bin 24576 -> 0 bytes tensorlayer/optimizers/dragon_optimizers.py | 56 - .../optimizers/load_optimizers_backend.py | 2 - .../optimizers/mindspore_optimizers.py | 12 +- tensorlayer/package_info.py | 12 +- tensorlayer/vision/__init__.py | 3 + tensorlayer/vision/functional_cv2.py | 667 +++++++ tensorlayer/vision/functional_pil.py | 554 ++++++ .../load_vision_backend.py} | 15 +- tensorlayer/vision/mindspore_vision.py | 610 +++++++ tensorlayer/vision/paddle_vision.py | 608 +++++++ tensorlayer/vision/tensorflow_vision.py | 1396 +++++++++++++++ tensorlayer/vision/transforms.py | 1256 ++++++++++++++ tensorlayer/visualize.py | 80 +- tensorlayer_cn.md | 346 ++++ tests/layers/test_layers_pooling.py | 35 +- 140 files changed, 11289 insertions(+), 7992 deletions(-) delete mode 100644 docs/modules/app.rst create mode 100644 docs/modules/dataflow.rst create mode 100644 docs/modules/vision.rst create mode 100644 examples/basic_tutorials/tutorial_SequentialLayer.py create mode 100644 examples/basic_tutorials/tutorial_automatic_inference_input _shape.py rename examples/basic_tutorials/{tutorial_cifar10_cnn_dynamic_MS_backend.py => tutorial_cifar10_cnn_mindspore_backend.py} (74%) create mode 100644 examples/basic_tutorials/tutorial_cifar10_cnn_paddle_backend.py rename examples/basic_tutorials/{tutorial_cifar10_cnn_dynamic_TF_backend.py => tutorial_cifar10_cnn_tensorflow_backend.py} (93%) create mode 100644 examples/basic_tutorials/tutorial_cifar10_cnn_tensorlayer.py create mode 100644 examples/basic_tutorials/tutorial_dataflow.py create mode 100644 examples/basic_tutorials/tutorial_mnist_gan_tensorlayer.py delete mode 100644 examples/basic_tutorials/tutorial_mnist_mlp_dynamci_dragon.py delete mode 100644 examples/basic_tutorials/tutorial_mnist_mlp_mindspore.py rename examples/basic_tutorials/{tutorial_mnist_mlp_dynamic_MS_backend.py => tutorial_mnist_mlp_mindspore_backend.py} (91%) rename examples/basic_tutorials/{tutorial_paddle_tensorlayer_mlp.py => tutorial_mnist_mlp_paddlepaddle_backend.py} (57%) rename examples/basic_tutorials/{tutorial_mnist_mlp_dynamic_TF_backend.py => tutorial_mnist_mlp_tensorflow_backend.py} (82%) create mode 100644 img/tensorlayer_v.png create mode 100644 requirements/requirements_paddle.txt delete mode 100644 run_compile.py delete mode 100644 tensorlayer/backend/ops/dragon_backend.py delete mode 100644 tensorlayer/backend/ops/dragon_nn.py delete mode 100644 tensorlayer/dataflow/dataflow_examples.py delete mode 100644 tensorlayer/dataflow/image/mindspore_image.py delete mode 100644 tensorlayer/dataflow/image/paddle_image.py delete mode 100644 tensorlayer/dataflow/image/tensorflow_image.py create mode 100644 tensorlayer/initializers/mindspore_initializers.py delete mode 100644 tensorlayer/layers/core/core_dragon.py delete mode 100644 tensorlayer/models/models/resnet50_weights_tf_dim_ordering_tf_kernels.h5 delete mode 100644 tensorlayer/optimizers/dragon_optimizers.py create mode 100644 tensorlayer/vision/__init__.py create mode 100644 tensorlayer/vision/functional_cv2.py create mode 100644 tensorlayer/vision/functional_pil.py rename tensorlayer/{dataflow/image/__init__.py => vision/load_vision_backend.py} (66%) create mode 100644 tensorlayer/vision/mindspore_vision.py create mode 100644 tensorlayer/vision/paddle_vision.py create mode 100644 tensorlayer/vision/tensorflow_vision.py create mode 100644 tensorlayer/vision/transforms.py create mode 100644 tensorlayer_cn.md diff --git a/.codacy.yaml b/.codacy.yaml index 14735f0..21aa475 100644 --- a/.codacy.yaml +++ b/.codacy.yaml @@ -2,7 +2,7 @@ --- engines: bandit: - enabled: false # FIXME: make it work + enabled: false # FIXME: make it works exclude_paths: - scripts/* - setup.py diff --git a/CHANGELOG.md b/CHANGELOG.md index a19b722..734f392 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,7 +67,6 @@ To release a new version, please update the changelog as followed: - ## [Unreleased] ### Added @@ -80,8 +79,68 @@ To release a new version, please update the changelog as followed: ### Fixed -- Fix README. (#PR 1044) -- Fix package info. (#PR 1046) +### Removed + +### Security + +### Contributors + +## [2.2.3] - 2020-06-18 + +TensorLayer 2.2.3 is a maintenance release. +It contains numerous bug fixes. + +### Added + +### Changed + +### Dependencies Update + +### Deprecated + +### Fixed + +- Fix VGG. (#1078, 1079, 1089) +- Fix norm layer. (#1080) +- Fix DeCov2d layer. (#1081) +- Fix ModelLayer and LayerList doc. (#1083) +- Fix bug in SAC. (#1085) +- Fix refactoring: Deduplication. (#1086) +- Fix maxpool, batchnorm Data format fixed, vgg forward. (#1089) +- Fix package info. (#1090) + +### Removed + +### Security + +### Contributors +- @zsdonghao +- @tiancheng2000 (#1078 #1079 #1080 #1081) +- @ChrisWu1997 (#1083) +- @quantumiracle (#1085) +- @marload (#1086) +- @Gyx-One (#1089) +- @Laicheng0830 (#1090) + +## [2.2.2] - 2020-04-26 + +TensorLayer 2.2.2 is a maintenance release. + +### Added + +- Reinforcement learning(#1065) +- Mish activation(#1068) + +### Changed + +### Dependencies Update + +### Deprecated + +### Fixed + +- Fix README. +- Fix package info. ### Removed @@ -89,8 +148,36 @@ To release a new version, please update the changelog as followed: ### Contributors -- @luomai (PR #1044, 1046) +- @zsdonghao +- @quantumiracle(1065) +- @Laicheng0830(#1068) + +## [2.2.1] - 2020-01-14 + +TensorLayer 2.2.1 is a maintenance release. +It contains numerous bug fixes. + +### Added + +### Changed + +### Dependencies Update + +### Deprecated + +### Fixed + +- Fix README. (#1044) +- Fix package info. (#1046) +- Fix build test (Using YAPF 0.29) (#1057) + +### Removed + +### Security + +### Contributors +- @luomai (#1044, #1046, #1057) ## [2.2.0] - 2019-09-13 @@ -150,7 +237,7 @@ This release is compatible with TensorFlow 2 RC1. - Replace tf.nn.func with tf.nn.func.\_\_name\_\_ in model config. (PR #994) - Add Reinforcement learning tutorials. (PR #995) - Add RNN layers with simple rnn cell, GRU cell, LSTM cell. (PR #998) -- Update Seq2seq (#998) +- Update Seq2seq (#998) - Add Seq2seqLuongAttention model (#998) ### Fixed @@ -571,12 +658,15 @@ To many PR for this update, please check [here](https://github.com/tensorlayer/t @zsdonghao @luomai @DEKHTIARJonathan [Unreleased]: https://github.com/tensorlayer/tensorlayer/compare/2.0....master -[2.2.0]: https://github.com/tensorlayer/tensorlayer/compare/2.2.0...2.2.0 -[2.1.0]: https://github.com/tensorlayer/tensorlayer/compare/2.1.0...2.1.0 -[2.0.2]: https://github.com/tensorlayer/tensorlayer/compare/2.0.2...2.0.2 -[2.0.1]: https://github.com/tensorlayer/tensorlayer/compare/2.0.1...2.0.1 -[2.0.0]: https://github.com/tensorlayer/tensorlayer/compare/2.0.0...2.0.0 -[1.11.1]: https://github.com/tensorlayer/tensorlayer/compare/1.11.0...1.11.0 +[2.2.3]: https://github.com/tensorlayer/tensorlayer/compare/2.2.2...2.2.3 +[2.2.2]: https://github.com/tensorlayer/tensorlayer/compare/2.2.1...2.2.2 +[2.2.1]: https://github.com/tensorlayer/tensorlayer/compare/2.2.0...2.2.1 +[2.2.0]: https://github.com/tensorlayer/tensorlayer/compare/2.1.0...2.2.0 +[2.1.0]: https://github.com/tensorlayer/tensorlayer/compare/2.0.2...2.1.0 +[2.0.2]: https://github.com/tensorlayer/tensorlayer/compare/2.0.1...2.0.2 +[2.0.1]: https://github.com/tensorlayer/tensorlayer/compare/2.0.0...2.0.1 +[2.0.0]: https://github.com/tensorlayer/tensorlayer/compare/1.11.1...2.0.0 +[1.11.1]: https://github.com/tensorlayer/tensorlayer/compare/1.11.0...1.11.1 [1.11.0]: https://github.com/tensorlayer/tensorlayer/compare/1.10.1...1.11.0 [1.10.1]: https://github.com/tensorlayer/tensorlayer/compare/1.10.0...1.10.1 [1.10.0]: https://github.com/tensorlayer/tensorlayer/compare/1.9.1...1.10.0 diff --git a/LICENSE.rst b/LICENSE.rst index b662f1d..b195ea1 100644 --- a/LICENSE.rst +++ b/LICENSE.rst @@ -1,7 +1,7 @@ License ======= -Copyright (c) 2016~2018 The TensorLayer contributors. All rights reserved. +Copyright (c) 2016~2020 The TensorLayer contributors. All rights reserved. Apache License Version 2.0, January 2004 @@ -208,4 +208,4 @@ Copyright (c) 2016~2018 The TensorLayer contributors. All rights reserved. Contact ======= -Questions? Please contact hao.dong11@imperial.ac.uk +Questions? Please contact hao.dong@pku.edu.cn diff --git a/Makefile b/Makefile index 4fbfd85..9ce4e51 100644 --- a/Makefile +++ b/Makefile @@ -14,16 +14,17 @@ test: python3 tests/files/test_utils_saveload.py format: - autoflake -i examples/*.py - autoflake -i tensorlayer/*.py - autoflake -i tensorlayer/**/*.py + autoflake -ir examples + autoflake -ir tensorlayer + autoflake -ir tests isort -rc examples isort -rc tensorlayer + isort -rc tests - yapf -i examples/*.py - yapf -i tensorlayer/*.py - yapf -i tensorlayer/**/*.py + yapf -ir examples + yapf -ir tensorlayer + yapf -ir tests install3: pip3 install -U . --user diff --git a/README.md b/README.md index 6d26423..05d28aa 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ - +
@@ -7,26 +7,28 @@ -![GitHub last commit (branch)](https://img.shields.io/github/last-commit/tensorlayer/tensorlayer/master.svg) +[![GitHub last commit (branch)](https://img.shields.io/github/last-commit/tensorlayer/tensorlayer/master.svg)](https://git.openi.org.cn/TensorLayer/tensorlayer3.0) [![Supported TF Version](https://img.shields.io/badge/TensorFlow-2.0.0%2B-brightgreen.svg)](https://github.com/tensorflow/tensorflow/releases) -[![Documentation Status](https://readthedocs.org/projects/tensorlayer/badge/)](https://tensorlayer.readthedocs.io/) -[![Build Status](https://travis-ci.org/tensorlayer/tensorlayer.svg?branch=master)](https://travis-ci.org/tensorlayer/tensorlayer) -[![Downloads](http://pepy.tech/badge/tensorlayer)](http://pepy.tech/project/tensorlayer) -[![Downloads](https://pepy.tech/badge/tensorlayer/week)](https://pepy.tech/project/tensorlayer/week) -[![Docker Pulls](https://img.shields.io/docker/pulls/tensorlayer/tensorlayer.svg)](https://hub.docker.com/r/tensorlayer/tensorlayer/) -[![Codacy Badge](https://api.codacy.com/project/badge/Grade/d6b118784e25435498e7310745adb848)](https://www.codacy.com/app/tensorlayer/tensorlayer) +[![Documentation Status](https://readthedocs.org/projects/tensorlayer/badge/)](https://tensorlayer3.readthedocs.io) +![Build Status](https://travis-ci.org/tensorlayer/tensorlayer.svg?branch=master) +![Downloads](http://pepy.tech/badge/tensorlayer) +![Downloads](https://pepy.tech/badge/tensorlayer/week) +![Docker Pulls](https://img.shields.io/docker/pulls/tensorlayer/tensorlayer.svg) +![Codacy Badge](https://api.codacy.com/project/badge/Grade/d6b118784e25435498e7310745adb848) + +[中文简介](https://git.openi.org.cn/TensorLayer/tensorlayer3.0/src/branch/master/tensorlayer_cn.md) -[TensorLayer](https://tensorlayer.readthedocs.io) is a novel TensorFlow-based deep learning and reinforcement learning library designed for researchers and engineers. It provides an extensive collection of customizable neural layers to build advanced AI models quickly, based on this, the community open-sourced mass [tutorials](https://github.com/tensorlayer/tensorlayer/blob/master/examples/reinforcement_learning/README.md) and [applications](https://github.com/tensorlayer). TensorLayer is awarded the 2017 Best Open Source Software by the [ACM Multimedia Society](https://twitter.com/ImperialDSI/status/923928895325442049). +[TensorLayer](https://tensorlayer3.readthedocs.io) is a novel supports multiple backends deep learning and reinforcement learning library designed for researchers and engineers. It provides an extensive collection of customizable neural layers to build advanced AI models quickly, based on this, the community open-sourced mass [tutorials](https://git.openi.org.cn/TensorLayer/tensorlayer3.0/src/branch/master/examples/basic_tutorials) and [applications](https://git.openi.org.cn/TensorLayer/tensorlayer3.0/src/branch/master/examples/model_zoo). TensorLayer is awarded the 2017 Best Open Source Software by the [ACM Multimedia Society](https://twitter.com/ImperialDSI/status/923928895325442049). This project can also be found at [iHub](https://code.ihub.org.cn/projects/328) and [Gitee](https://gitee.com/organizations/TensorLayer). # News -🔥 **3.0.0 will supports multiple backends, such as TensorFlow, MindSpore, PaddlePaddle and more, allowing users to run the code on different hardware like Nvidia-GPU and Huawei-Ascend. We need more people to join the dev team, if you are interested, please email hao.dong@pku.edu.cn** +🔥 **3.0.0 has been pre-released, it supports TensorFlow and MindSpore backends, and supports some PaddlePaddle operator backends, allowing users to run the code on different hardware like Nvidia-GPU and Huawei-Ascend. It will support TensorFlow, MindSpore, PaddlePaddle, and PyTorch backends in the future. Feel free to use it and make suggestions. We need more people to join the dev team, if you are interested, please email hao.dong@pku.edu.cn** 🔥 Reinforcement Learning Zoo: [Low-level APIs](https://github.com/tensorlayer/tensorlayer/tree/master/examples/reinforcement_learning) for professional usage, [High-level APIs](https://github.com/tensorlayer/RLzoo) for simple usage, and a corresponding [Springer textbook](http://springer.com/gp/book/9789811540943) @@ -38,7 +40,7 @@ This project can also be found at [iHub](https://code.ihub.org.cn/projects/328) TensorLayer is a new deep learning library designed with simplicity, flexibility and high-performance in mind. -- ***Simplicity*** : TensorLayer has a high-level layer/model abstraction which is effortless to learn. You can learn how deep learning can benefit your AI tasks in minutes through the massive [examples](https://github.com/tensorlayer/awesome-tensorlayer). +- ***Simplicity*** : TensorLayer has a high-level layer/model abstraction which is effortless to learn. You can learn how deep learning can benefit your AI tasks in minutes through the massive [examples](https://git.openi.org.cn/TensorLayer/tensorlayer3.0/src/branch/master/examples). - ***Flexibility*** : TensorLayer APIs are transparent and flexible, inspired by the emerging PyTorch library. Compared to the Keras abstraction, TensorLayer makes it much easier to build and train complex AI models. - ***Zero-cost Abstraction*** : Though simple to use, TensorLayer does not require you to make any compromise in the performance of TensorFlow (Check the following benchmark section for more details). @@ -53,12 +55,12 @@ Imperial College London, UC Berkeley, Carnegie Mellon University, Stanford Unive TensorLayer has extensive documentation for both beginners and professionals. The documentation is available in both English and Chinese. -[![English Documentation](https://img.shields.io/badge/documentation-english-blue.svg)](https://tensorlayer.readthedocs.io/) +[![English Documentation](https://img.shields.io/badge/documentation-english-blue.svg)](https://tensorlayer3.readthedocs.io/) [![Chinese Documentation](https://img.shields.io/badge/documentation-%E4%B8%AD%E6%96%87-blue.svg)](https://tensorlayercn.readthedocs.io/) [![Chinese Book](https://img.shields.io/badge/book-%E4%B8%AD%E6%96%87-blue.svg)](http://www.broadview.com.cn/book/5059/) If you want to try the experimental features on the the master branch, you can find the latest document -[here](https://tensorlayer.readthedocs.io/en/latest/). +[here](https://tensorlayer3.readthedocs.io/en/latest/). # Extensive Examples @@ -71,8 +73,15 @@ You can find a large collection of examples that use TensorLayer in [here](examp
# Getting Start +Comparison of TensorLayer version + + +
+ +
+
-TensorLayer 2.0 relies on TensorFlow, numpy, and others. To use GPUs, CUDA and cuDNN are required. +TensorLayer 3.0 relies on TensorFlow, numpy, and others. To use GPUs, CUDA and cuDNN are required. Install TensorFlow: @@ -81,16 +90,21 @@ pip3 install tensorflow-gpu==2.0.0-rc1 # TensorFlow GPU (version 2.0 RC1) pip3 install tensorflow # CPU version ``` -Install the stable release of TensorLayer: +Install the stable release of TensorLayer3: ```bash -pip3 install tensorlayer +pip3 install tensorlayer3 ``` -Install the unstable development version of TensorLayer: +Install the stable release of TensorLayer2.x: + +```bast +pip3 install tensorlayer +``` +Install the unstable development version of TensorLayer3: ```bash -pip3 install git+https://github.com/tensorlayer/tensorlayer.git +pip3 install git+https://git.openi.org.cn/TensorLayer/tensorlayer3.0.git ``` If you want to install the additional dependencies, you can also run @@ -99,6 +113,15 @@ pip3 install --upgrade tensorlayer[all] # all additional dependenci pip3 install --upgrade tensorlayer[extra] # only the `extra` dependencies pip3 install --upgrade tensorlayer[contrib_loggers] # only the `contrib_loggers` dependencies ``` +If you want to use mindspore backend, you should install mindspore>=1.2.1 +```bash +pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.2.1/MindSpore/gpu/ubuntu_x86/cuda-10.1/mindspore_gpu-1.2.1-cp37-cp37m-linux_x86_64.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple +``` + +If you want to use paddlepaddle backend, you should install paddlepaddle>=2.1.1 +```bash +python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` If you are TensorFlow 1.X users, you can use TensorLayer 1.11.0: @@ -150,6 +173,7 @@ The following table shows the training speeds of [VGG16](http://www.robots.ox.ac | Graph | Keras | channel last | 8677 | 2580 | 2576 | 101 | | Eager | TensorFlow 2.0 | channel last | 8723 | 2052 | 2024 | 97 | | | TensorLayer 2.0 | channel last | 8723 | 2010 | 2007 | 95 | +| | TensorLayer 3.0 | channel last | | | | | # Getting Involved @@ -169,7 +193,7 @@ We suggest users to report bugs using Github issues. Users can also discuss how # Citing TensorLayer -If you find TensorLayer useful for your project, please cite the following paper: +If you find TensorLayer useful for your project, please cite the following papers: ``` @article{tensorlayer2017, @@ -179,4 +203,13 @@ If you find TensorLayer useful for your project, please cite the following paper url = {http://tensorlayer.org}, year = {2017} } + +@inproceedings{tensorlayer2021, + title={Tensorlayer 3.0: A Deep Learning Library Compatible With Multiple Backends}, + author={Lai, Cheng and Han, Jiarong and Dong, Hao}, + booktitle={2021 IEEE International Conference on Multimedia \& Expo Workshops (ICMEW)}, + pages={1--3}, + year={2021}, + organization={IEEE} +} ``` diff --git a/README.rst b/README.rst index 5f424cf..f7971cd 100644 --- a/README.rst +++ b/README.rst @@ -10,12 +10,15 @@ |JOIN-SLACK-LOGO| -TensorLayer is a novel TensorFlow-based deep learning and reinforcement -learning library designed for researchers and engineers. It provides a -large collection of customizable neural layers / functions that are key -to build real-world AI applications. TensorLayer is awarded the 2017 -Best Open Source Software by the `ACM Multimedia -Society `__. +`TensorLayer3 `__ is a novel supports +multiple backends deep learning and reinforcement learning library designed +for researchers and engineers. +It provides an extensive collection of customizable neural layers to +build advanced AI models quickly, based on this, the community open-sourced +mass `tutorials `__ and +`applications `__. +TensorLayer is awarded the 2017 Best Open Source Software by the `ACM Multimedia Society `__. +This project can also be found at `OpenI `__ and `Gitee `__. Why another deep learning library: TensorLayer ============================================== @@ -73,15 +76,15 @@ The simplest way to install TensorLayer is to use the Python Package Index (PyPI .. code:: bash # for last stable version - pip install --upgrade tensorlayer + pip install --upgrade tensorlayer3 # for latest release candidate - pip install --upgrade --pre tensorlayer + pip install --upgrade --pre tensorlayer3 # if you want to install the additional dependencies, you can also run - pip install --upgrade tensorlayer[all] # all additional dependencies - pip install --upgrade tensorlayer[extra] # only the `extra` dependencies - pip install --upgrade tensorlayer[contrib_loggers] # only the `contrib_loggers` dependencies + pip install --upgrade tensorlayer3[all] # all additional dependencies + pip install --upgrade tensorlayer3[extra] # only the `extra` dependencies + pip install --upgrade tensorlayer3[contrib_loggers] # only the `contrib_loggers` dependencies Alternatively, you can install the latest or development version by directly pulling from github: @@ -139,7 +142,7 @@ Cite ==== If you find this project useful, we would be grateful if you cite the -TensorLayer paper: +TensorLayer papers. :: @@ -151,6 +154,17 @@ TensorLayer paper: year = {2017} } +:: + + @inproceedings{tensorlayer2021, + title={Tensorlayer 3.0: A Deep Learning Library Compatible With Multiple Backends}, + author={Lai, Cheng and Han, Jiarong and Dong, Hao}, + booktitle={2021 IEEE International Conference on Multimedia \& Expo Workshops (ICMEW)}, + pages={1--3}, + year={2021}, + organization={IEEE} + } + License ======= diff --git a/docs/index.rst b/docs/index.rst index b4b1fd2..27cac3d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -57,12 +57,14 @@ method, this part of the documentation is for you. modules/activation modules/array_ops modules/cost + modules/dataflow modules/prepro modules/files modules/iterate modules/layers modules/models modules/nlp + modules/vision modules/initializers modules/rein modules/utils diff --git a/docs/modules/activation.rst b/docs/modules/activation.rst index 79bad96..be250d8 100644 --- a/docs/modules/activation.rst +++ b/docs/modules/activation.rst @@ -2,9 +2,7 @@ API - Activations ========================= To make TensorLayer simple, we minimize the number of activation functions as much as -we can. So we encourage you to use TensorFlow's function. TensorFlow provides -``tf.nn.relu``, ``tf.nn.relu6``, ``tf.nn.elu``, ``tf.nn.softplus``, -``tf.nn.softsign`` and so on. +we can. So we encourage you to use Customizes activation function. For parametric activation, please read the layer APIs. The shortcut of ``tensorlayer.activation`` is ``tensorlayer.act``. @@ -14,64 +12,71 @@ Your activation Customizes activation function in TensorLayer is very easy. The following example implements an activation that multiplies its input by 2. -For more complex activation, TensorFlow API will be required. +For more complex activation, TensorFlow(MindSpore/PaddlePaddle) API will be required. .. code-block:: python - def double_activation(x): - return x * 2 - - double_activation = lambda x: x * 2 + class DoubleActivation(object): + def __init__(self): + pass + def __call__(self, x): + return x * 2 + double_activation = DoubleActivation() -.. automodule:: tensorlayer.activation +.. automodule:: tensorlayer.layers.activation .. autosummary:: - leaky_relu - leaky_relu6 - leaky_twice_relu6 - ramp - swish - sign - hard_tanh - pixel_wise_softmax - mish - -Ramp + PRelu + PRelu6 + PTRelu6 + LeakyReLU + LeakyReLU6 + LeakyTwiceRelu6 + Ramp + Swish + HardTanh + Mish + +PRelu ------ -.. autofunction:: ramp +.. autofunction:: PRelu -Leaky ReLU +PRelu6 ------------ -.. autofunction:: leaky_relu +.. autofunction:: PRelu6 -Leaky ReLU6 +PTRelu6 ------------ -.. autofunction:: leaky_relu6 +.. autofunction:: PTRelu6 -Twice Leaky ReLU6 +LeakyReLU ----------------- -.. autofunction:: leaky_twice_relu6 +.. autofunction:: LeakyReLU -Swish +LeakyReLU6 ------------ -.. autofunction:: swish +.. autofunction:: LeakyReLU6 -Sign +LeakyTwiceRelu6 --------------------- -.. autofunction:: sign +.. autofunction:: LeakyTwiceRelu6 -Hard Tanh +Ramp --------------------- -.. autofunction:: hard_tanh +.. autofunction:: Ramp -Pixel-wise softmax +Swish -------------------- -.. autofunction:: pixel_wise_softmax +.. autofunction:: Swish + +HardTanh +---------------- +.. autofunction:: HardTanh -mish +Mish --------- -.. autofunction:: mish +.. autofunction:: Mish Parametric activation ------------------------------ diff --git a/docs/modules/app.rst b/docs/modules/app.rst deleted file mode 100644 index d636292..0000000 --- a/docs/modules/app.rst +++ /dev/null @@ -1,10 +0,0 @@ -API - Application Library -========================= - -Application library is an open source Deep learning applications based on TensorLayer. - -Supported Application: -------------------------- - - - diff --git a/docs/modules/cost.rst b/docs/modules/cost.rst index eba52f4..6277b9d 100644 --- a/docs/modules/cost.rst +++ b/docs/modules/cost.rst @@ -11,7 +11,7 @@ we can. So we encourage you to use TensorFlow's function, , see `TensorFlow API .. autosummary:: - cross_entropy + softmax_cross_entropy_with_logits sigmoid_cross_entropy binary_cross_entropy mean_squared_error @@ -28,12 +28,11 @@ we can. So we encourage you to use TensorFlow's function, , see `TensorFlow API maxnorm_regularizer maxnorm_o_regularizer maxnorm_i_regularizer - huber_loss Softmax cross entropy ---------------------- -.. autofunction:: cross_entropy +.. autofunction:: softmax_cross_entropy_with_logits Sigmoid cross entropy ---------------------- @@ -94,7 +93,3 @@ Special .. autofunction:: lo_regularizer .. autofunction:: maxnorm_o_regularizer .. autofunction:: maxnorm_i_regularizer - -Huber Loss -^^^^^^^^^^ -.. autofunction:: huber_loss \ No newline at end of file diff --git a/docs/modules/dataflow.rst b/docs/modules/dataflow.rst new file mode 100644 index 0000000..5ffcc56 --- /dev/null +++ b/docs/modules/dataflow.rst @@ -0,0 +1,79 @@ +API - Dataflow +============ + +.. automodule:: tensorlayer.dataflow + +.. ----------------------------------------------------------- +.. Dataflow List +.. ----------------------------------------------------------- + +Dataflow list +---------------------- + +.. autosummary:: + + Dataset + IterableDataset + FromGenerator + FromSlices + Dataloader + + Concat + Zip + Batch + Map + Repeat + Shuffle + +.. ----------------------------------------------------------- +.. Dataflow +.. ----------------------------------------------------------- + +Dataflow +----------------- + +Dataset +^^^^^^^^^^^^^^^^ +.. autoclass:: Dataset + + +IterableDataset +^^^^^^^^^^^^^^^^ +.. autoclass:: IterableDataset + +FromGenerator +^^^^^^^^^^^^^^^^ +.. autoclass:: FromGenerator + +FromSlices +^^^^^^^^^^^^^^^^ +.. autoclass:: FromSlices + +Dataloader +^^^^^^^^^^^^^^^^ +.. autoclass:: Dataloader + +Concat +^^^^^^^^^^^^^^^^ +.. autoclass:: Concat + +Zip +^^^^^^^^^^^^^^^^ +.. autoclass:: Zip + +Batch +^^^^^^^^^^^^^^^^ +.. autoclass:: Batch + +Map +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: Map + +Repeat +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: Repeat + +Shuffle +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: Shuffle + diff --git a/docs/modules/initializers.rst b/docs/modules/initializers.rst index 6311619..3bf4213 100644 --- a/docs/modules/initializers.rst +++ b/docs/modules/initializers.rst @@ -16,6 +16,7 @@ e.g. ``tf.initializers.he_normal``, please refer to TensorFlow provided initiali RandomUniform RandomNormal TruncatedNormal + HeNormal deconv2d_bilinear_upsampling_initializer Initializer @@ -46,6 +47,10 @@ TruncatedNormal --------------------- .. autoclass:: TruncatedNormal +HeNormal +------------ +.. autoclass:: HeNormal + deconv2d_bilinear_upsampling_initializer ------------------------------------------ .. autofunction:: deconv2d_bilinear_upsampling_initializer diff --git a/docs/modules/layers.rst b/docs/modules/layers.rst index 78e0eee..8f08aef 100644 --- a/docs/modules/layers.rst +++ b/docs/modules/layers.rst @@ -12,10 +12,9 @@ Layer list .. autosummary:: - Layer + Module - ModelLayer - LayerList + SequentialLayer Input @@ -73,14 +72,6 @@ Layer list BatchNorm1d BatchNorm2d BatchNorm3d - LocalResponseNorm - InstanceNorm - InstanceNorm1d - InstanceNorm2d - InstanceNorm3d - LayerNorm - GroupNorm - SwitchNorm RNN SimpleRNN @@ -134,17 +125,13 @@ Layer list Base Layer ----------- -Base Layer -^^^^^^^^^^^^^^^^ -.. autoclass:: Layer - -Model Layer +Module ^^^^^^^^^^^^^^^^ -.. autoclass:: ModelLayer +.. autoclass:: Module -Layer List +Sequential Layer ^^^^^^^^^^^^^^^^ -.. autoclass:: LayerList +.. autoclass:: SequentialLayer .. ----------------------------------------------------------- .. Input Layer @@ -399,38 +386,6 @@ Batch Normalization 3D ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: BatchNorm3d -Local Response Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: LocalResponseNorm - -Instance Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: InstanceNorm - -Instance Normalization 1D -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: InstanceNorm1d - -Instance Normalization 2D -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: InstanceNorm2d - -Instance Normalization 3D -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: InstanceNorm3d - -Layer Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: LayerNorm - -Group Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: GroupNorm - -Switch Normalization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: SwitchNorm - .. ----------------------------------------------------------- .. Padding Layers .. ----------------------------------------------------------- diff --git a/docs/modules/models.rst b/docs/modules/models.rst index 272f1d9..821d30c 100644 --- a/docs/modules/models.rst +++ b/docs/modules/models.rst @@ -1,59 +1,34 @@ -API - Models +API - Pretrained Models ================================ TensorLayer provides many pretrained models, you can easily use the whole or a part of the pretrained models via these APIs. -.. automodule:: tensorlayer.models +.. automodule:: examples.model_zoo .. autosummary:: - Model - - VGG16 - VGG19 - SqueezeNetV1 - MobileNetV1 + vgg16 + vgg19 + YOLOv4 ResNet50 - Seq2seq - Seq2seqLuongAttention - - -Base Model ------------ -.. autoclass:: Model - -VGG16 +vgg16 ---------------------- -.. autofunction:: VGG16 +.. autofunction:: vgg16 -VGG19 +vgg19 ---------------------- -.. autofunction:: VGG19 - -SqueezeNetV1 ----------------- -.. autofunction:: SqueezeNetV1 +.. autofunction:: vgg19 -MobileNetV1 +YOLOv4 ---------------- -.. autofunction:: MobileNetV1 +.. autofunction:: YOLOv4 ResNet50 ---------------- -.. autofunction:: ResNet50 - -Seq2seq ------------------------- - -.. autoclass:: Seq2seq - - -Seq2seq Luong Attention ------------------------- +.. autofuncion:: ResNet50 -.. autoclass:: Seq2seqLuongAttention diff --git a/docs/modules/optimizers.rst b/docs/modules/optimizers.rst index 0ababc8..9f272d3 100644 --- a/docs/modules/optimizers.rst +++ b/docs/modules/optimizers.rst @@ -5,6 +5,8 @@ API - Optimizers TensorLayer provides simple API and tools to ease research, development and reduce the time to production. Therefore, we provide the latest state of the art optimizers that work with Tensorflow. +The optimizers functions provided by TensorFlow can be used in TensorLayer. +We have also wrapped the optimizers functions for each framework, which can be found in tensorlayer.optimizers. Optimizers List --------------- @@ -12,6 +14,17 @@ Optimizers List .. autosummary:: AMSGrad + Adadelta + Adagrad + Adam + Adamax + Ftrl + Nadam + RMSprop + SGD + Momentum + Lamb + LARS AMSGrad Optimizer ----------------- diff --git a/docs/modules/vision.rst b/docs/modules/vision.rst new file mode 100644 index 0000000..70718bf --- /dev/null +++ b/docs/modules/vision.rst @@ -0,0 +1,204 @@ +API - Vision +============ + +.. automodule:: tensorlayer.vision.transforms + +.. ----------------------------------------------------------- +.. Vision Transforms List +.. ----------------------------------------------------------- + +Vision Transforms list +---------------------- + +.. autosummary:: + + ToTensor + Compose + + Crop + CentralCrop + RandomCrop + Pad + PadToBoundingbox + Resize + RandomResizedCrop + + RgbToGray + HsvToRgb + RgbToHsv + + AdjustBrightness + AdjustContrast + AdjustHue + AdjustSaturation + RandomBrightness + RandomContrast + RandomHue + RandomSaturation + ColorJitter + + FlipHorizontal + FlipVertical + RandomFlipHorizontal + RandomFlipVertical + + RandomRotation + RandomShift + RandomShear + RandomZoom + RandomAffine + + Transpose + HWC2CHW + CHW2HWC + + Normalize + StandardizePerImage + +.. ----------------------------------------------------------- +.. Vision Transforms +.. ----------------------------------------------------------- + +Vision Transforms +----------------- + +ToTensor +^^^^^^^^^^^^^^^^ +.. autoclass:: ToTensor + + +Compose +^^^^^^^^^^^^^^^^ +.. autoclass:: Compose + +Crop +^^^^^^^^^^^^^^^^ +.. autoclass:: Crop + +CentralCrop +^^^^^^^^^^^^^^^^ +.. autoclass:: CentralCrop + +RandomCrop +^^^^^^^^^^^^^^^^ +.. autoclass:: RandomCrop + +Pad +^^^^^^^^^^^^^^^^ +.. autoclass:: Pad + +PadToBoundingbox +^^^^^^^^^^^^^^^^ +.. autoclass:: PadToBoundingbox + +Resize +^^^^^^^^^^^^^^^^ +.. autoclass:: Resize + +RandomResizedCrop +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomResizedCrop + +RgbToGray +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RgbToGray + +HsvToRgb +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: HsvToRgb + +RgbToHsv +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RgbToHsv + +AdjustBrightness +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: AdjustBrightness + +AdjustContrast +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: AdjustContrast + +AdjustHue +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: AdjustHue + +AdjustSaturation +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: AdjustSaturation + +RandomBrightness +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomBrightness + +RandomContrast +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomContrast + +RandomHue +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomHue + +RandomSaturation +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomSaturation + +ColorJitter +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: ColorJitter + +FlipHorizontal +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: FlipHorizontal + +FlipVertical +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: FlipVertical + +RandomFlipHorizontal +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomFlipHorizontal + +RandomFlipVertical +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomFlipVertical + +RandomRotation +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomRotation + +RandomShift +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomShift + +RandomShear +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomShear + +RandomZoom +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomZoom + +RandomAffine +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: RandomAffine + +Transpose +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: Transpose + +HWC2CHW +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: HWC2CHW + +CHW2HWC +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: CHW2HWC + +Normalize +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: Normalize + +StandardizePerImage +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: StandardizePerImage \ No newline at end of file diff --git a/docs/user/contributing.rst b/docs/user/contributing.rst index 9b1d98f..64c8354 100644 --- a/docs/user/contributing.rst +++ b/docs/user/contributing.rst @@ -4,8 +4,8 @@ Contributing =============== -TensorLayer 2.0 is a major ongoing research project in CFCS, Peking University, the first version was established at Imperial College London in 2016. The goal of the project is to develop a compositional language while complex learning systems -can be built through composition of neural network modules. +TensorLayer 3.0 is a major ongoing research project in Peking University and Pengcheng Laboratory, the first version was established at Imperial College London in 2016. The goal of the project is to develop a compositional languagea that is compatible with multiple deep learning frameworks, +while complex learning systems can be built through composition of neural network modules. Numerous contributors come from various horizons such as: Imperial College London, Tsinghua University, Carnegie Mellon University, Stanford, University of Technology of Compiegne, Google, Microsoft, Bloomberg and etc. @@ -25,6 +25,12 @@ Project Maintainers The TensorLayer project was started by `Hao Dong `_ at Imperial College London in June 2016. +For TensorLayer 3.x, it is now actively developing and maintaining by the following people *(in alphabetical order)*: + +- **Cheng Lai** (`@Laicheng0830 `_) - ``_ +- **Hao Dong** (`@zsdonghao `_) - ``_ +- **Jiarong Han** (`@hanjr92 `_) - ``_ + For TensorLayer 2.x, it is now actively developing and maintaining by the following people who has more than 50 contributions: - **Hao Dong** (`@zsdonghao `_) - ``_ diff --git a/docs/user/examples.rst b/docs/user/examples.rst index 91971c0..80c3e8b 100644 --- a/docs/user/examples.rst +++ b/docs/user/examples.rst @@ -6,13 +6,28 @@ Examples We list some examples here, but more tutorials and applications can be found in `Github examples `__ and `Awesome-TensorLayer `_. +Commonly used dataset and pretrained models +=========================================== + + - MNIST, see `MNIST `__. + - CIFAR10, see `CIFAR10 `__. + + - YOLOv4 Pretrained Model, see `YOLOv4 `__. password: idsz + - VGG16 Pretrained Model, see `VGG16 `__. password: t36u + - VGG19 Pretrained Model, see `VGG19 `__. password: rb8w + - ResNet50 Pretrained Model, see `ResNet50 `__. password: 3nui + Basics ============ - - Multi-layer perceptron (MNIST), simple usage. Classification task, see `tutorial_mnist_simple.py `__. - - Multi-layer perceptron (MNIST), dynamic model. Classification with dropout using iterator, see `tutorial_mnist_mlp_dynamic.py method2 `__. - - Multi-layer perceptron (MNIST), static model. Classification with dropout using iterator, see `tutorial_mnist_mlp_static.py `__. - - Convolutional Network (CIFAR-10). Classification task, see `tutorial_cifar10_cnn_static.py `_. + - Multi-layer perceptron (MNIST), simple usage and supports multiple backends. Classification task, see `tutorial_mnist_simple.py `__. + - Multi-layer perceptron (MNIST), mix of tensorlayer and tensorflow. Classification with dropout using iterator, see `tutorial_mnist_mlp_tensorflow_backend.py `__. + - Multi-layer perceptron (MNIST), mix of tensorlayer and mindspore. Classification task, see `tutorial_mnist_mlp_mindspore_backend.py `__. + - Multi-layer perceptron (MNIST), mix of tensorlayer and paddlepaddle. Classification task, see `tutorial_mnist_mlp_paddlepaddle_backend.py `__. + + - Convolutional Network (CIFAR-10). mix of tensorlayer and tensorflow. Classification task, see `tutorial_cifar10_cnn_tensorflow_backend.py `_. + - Convolutional Network (CIFAR-10). mix of tensorlayer and mindspore. Classification task, see `tutorial_cifar10_cnn_mindspore_backend.py `_. + - TensorFlow dataset API for object detection see `here `__. - Data augmentation with TFRecord. Effective way to load and pre-process data, see `tutorial_tfrecord*.py `__ and `tutorial_cifar10_tfrecord.py `__. - Data augmentation with TensorLayer. See `tutorial_fast_affine_transform.py `__ (for quick test only). @@ -20,15 +35,16 @@ Basics Pretrained Models ================== - - VGG 16 (ImageNet). Classification task, see `tutorial_models_vgg16 `__. + - VGG 16 (ImageNet). Classification task, see `pretrained_vgg16 `__. - VGG 19 (ImageNet). Classification task, see `tutorial_models_vgg19.py `__. - - SqueezeNet (ImageNet). Model compression, see `tutorial_models_squeezenetv1.py `__. - - MobileNet (ImageNet). Model compression, see `tutorial_models_mobilenetv1.py `__. + - YOLOv4 (MS-COCO). Object Detection, see `pretrained_yolov4.py `__. + - SqueezeNet (ImageNet, Based on TensroLayer2.0). Model compression, see `tutorial_models_squeezenetv1.py `__. + - MobileNet (ImageNet, Based on TensroLayer2.0). Model compression, see `tutorial_models_mobilenetv1.py `__. - All pretrained models in `pretrained-models `__. Vision ================== - +Warning:These examples below only support Tensorlayer 2.0. Tensorlayer 3.0 is under development. - Arbitrary Style Transfer in Real-time with Adaptive Instance Normalization, see `examples `__. - ArcFace: Additive Angular Margin Loss for Deep Face Recognition, see `InsignFace `__. - BinaryNet. Model compression, see `mnist `__ `cifar10 `__. @@ -44,6 +60,7 @@ Vision Adversarial Learning ======================== +Warning:These examples below only support Tensorlayer 2.0. Tensorlayer 3.0 is under development. - DCGAN (CelebA). Generating images by `Deep Convolutional Generative Adversarial Networks `__ by `zsdonghao `__. - `Generative Adversarial Text to Image Synthesis `__ by `zsdonghao `__. - `Unsupervised Image to Image Translation with Generative Adversarial Networks `__ by `zsdonghao `__. @@ -54,7 +71,7 @@ Adversarial Learning Natural Language Processing ============================== - +Warning:These examples below only support Tensorlayer 2.0. Tensorlayer 3.0 is under development. - Recurrent Neural Network (LSTM). Apply multiple LSTM to PTB dataset for language modeling, see `tutorial_ptb_lstm_state_is_tuple.py `__. - Word Embedding (Word2vec). Train a word embedding matrix, see `tutorial_word2vec_basic.py `__. - Restore Embedding matrix. Restore a pre-train embedding matrix, see `tutorial_generate_text.py `__. @@ -65,7 +82,7 @@ Natural Language Processing Reinforcement Learning ============================== - +Warning:These examples below only support Tensorlayer 2.0. Tensorlayer 3.0 is under development. - Policy Gradient / Network (Atari Ping Pong), see `tutorial_atari_pong.py `__. - Deep Q-Network (Frozen lake), see `tutorial_frozenlake_dqn.py `__. - Q-Table learning algorithm (Frozen lake), see `tutorial_frozenlake_q_table.py `__. @@ -77,6 +94,7 @@ Reinforcement Learning Miscellaneous ================= +Warning:These examples below only support Tensorlayer 2.0. Tensorlayer 3.0 is under development. - `Sipeed `__ : Run TensorLayer on AI Chips diff --git a/docs/user/get_start_advance.rst b/docs/user/get_start_advance.rst index db3441c..1dae18a 100644 --- a/docs/user/get_start_advance.rst +++ b/docs/user/get_start_advance.rst @@ -11,11 +11,13 @@ Customizing layer Layers with weights ---------------------- -The fully-connected layer is `a = f(x*W+b)`, the most simple implementation is as follow, which can only support static model. +The fully-connected layer is `a = f(x*W+b)`, the most simple implementation is as follow. .. code-block:: python - class Dense(Layer): + from tensorlayer.layers import Module + + class Dense(Module): """The :class:`Dense` class is a fully connected layer. Parameters @@ -33,12 +35,16 @@ The fully-connected layer is `a = f(x*W+b)`, the most simple implementation is a n_units, # the number of units/channels of this layer act=None, # None: no activation, tf.nn.relu or 'relu': ReLU ... name=None, # the name of this layer (optional) + in_channels = None ): super(Dense, self).__init__(name, act=act) # auto naming, dense_1, dense_2 ... self.n_units = n_units + self.in_channels = in_channels + self.build() + self._built = True - def build(self, inputs_shape): # initialize the model weights here - shape = [inputs_shape[1], self.n_units] + def build(self): # initialize the model weights here + shape = [self.in_channels, self.n_units] self.W = self._get_weights("weights", shape=tuple(shape), init=self.W_init) self.b = self._get_weights("biases", shape=(self.n_units, ), init=self.b_init) @@ -48,13 +54,14 @@ The fully-connected layer is `a = f(x*W+b)`, the most simple implementation is a z = self.act(z) return z -The full implementation is as follow, which supports both static and dynamic models and allows users to control whether to use the bias, how to initialize the weight values. +The full implementation is as follow, which supports both automatic inference input and dynamic models and allows users to control whether to use the bias, how to initialize the weight values. .. code-block:: python - class Dense(Layer): + + class Dense(Module): """The :class:`Dense` class is a fully connected layer. - + Parameters ---------- n_units : int @@ -70,38 +77,53 @@ The full implementation is as follow, which supports both static and dynamic mod If None, it will be automatically detected when the layer is forwarded for the first time. name : None or str A unique layer name. If None, a unique name will be automatically generated. + + Examples + -------- + With TensorLayer + + >>> net = tl.layers.Input([100, 50], name='input') + >>> dense = tl.layers.Dense(n_units=800, act=tl.ReLU, in_channels=50, name='dense_1') + >>> print(dense) + Dense(n_units=800, relu, in_channels='50', name='dense_1') + >>> tensor = tl.layers.Dense(n_units=800, act=tl.ReLU, name='dense_2')(net) + >>> print(tensor) + tf.Tensor([...], shape=(100, 800), dtype=float32) + + Notes + ----- + If the layer input has more than two axes, it needs to be flatten by using :class:`Flatten`. + """ - + def __init__( - self, - n_units, - act=None, - W_init=tl.initializers.truncated_normal(stddev=0.1), - b_init=tl.initializers.constant(value=0.0), - in_channels=None, # the number of units/channels of the previous layer - name=None, + self, + n_units, + act=None, + W_init=tl.initializers.truncated_normal(stddev=0.05), + b_init=tl.initializers.constant(value=0.0), + in_channels=None, + name=None, # 'dense', ): - # we feed activation function to the base layer, `None` denotes identity function - # string (e.g., relu, sigmoid) will be converted into function. - super(Dense, self).__init__(name, act=act) + + super(Dense, self).__init__(name, act=act) self.n_units = n_units self.W_init = W_init self.b_init = b_init self.in_channels = in_channels - # in dynamic model, the number of input channel is given, we initialize the weights here - if self.in_channels is not None: + if self.in_channels is not None: self.build(self.in_channels) self._built = True logging.info( "Dense %s: %d %s" % - (self.name, self.n_units, self.act.__name__ if self.act is not None else 'No Activation') + (self.name, self.n_units, self.act.__class__.__name__ if self.act is not None else 'No Activation') ) - def __repr__(self): # optional, for printing information - actstr = self.act.__name__ if self.act is not None else 'No Activation' + def __repr__(self): + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ('{classname}(n_units={n_units}, ' + actstr) if self.in_channels is not None: s += ', in_channels=\'{in_channels}\'' @@ -110,21 +132,40 @@ The full implementation is as follow, which supports both static and dynamic mod s += ')' return s.format(classname=self.__class__.__name__, **self.__dict__) - def build(self, inputs_shape): # initialize the model weights here - if self.in_channels: # if the number of input channel is given, use it + def build(self, inputs_shape): + if self.in_channels is None and len(inputs_shape) != 2: + raise AssertionError("The input dimension must be rank 2, please reshape or flatten it") + if self.in_channels: shape = [self.in_channels, self.n_units] - else: # otherwise, get it from static model + else: self.in_channels = inputs_shape[1] shape = [inputs_shape[1], self.n_units] + self.W = self._get_weights("weights", shape=tuple(shape), init=self.W_init) - if self.b_init: # if b_init is None, no bias is applied - self.b = self._get_weights("biases", shape=(self.n_units, ), init=self.b_init) - def forward(self, inputs): - z = tf.matmul(inputs, self.W) + self.b_init_flag = False if self.b_init: - z = tf.add(z, self.b) + self.b = self._get_weights("biases", shape=(self.n_units, ), init=self.b_init) + self.b_init_flag = True + self.bias_add = tl.ops.BiasAdd() + + self.act_init_flag = False if self.act: + self.act_init_flag = True + + self.matmul = tl.ops.MatMul() + + def forward(self, inputs): + if self._forward_state == False: + if self._built == False: + self.build(tl.get_tensor_shape(inputs)) + self._built = True + self._forward_state = True + + z = self.matmul(inputs, self.W) + if self.b_init_flag: + z = self.bias_add(z, self.b) + if self.act_init_flag: z = self.act(z) return z @@ -136,37 +177,54 @@ We use Dropout as an example here: .. code-block:: python - class Dropout(Layer): - """ - The :class:`Dropout` class is a noise layer which randomly set some - activations to zero according to a keeping probability. - Parameters - ---------- - keep : float - The keeping probability. - The lower the probability it is, the more activations are set to zero. - name : None or str - A unique layer name. - """ - - def __init__(self, keep, name=None): - super(Dropout, self).__init__(name) - self.keep = keep - - self.build() - self._built = True - - logging.info("Dropout %s: keep: %f " % (self.name, self.keep)) - - def build(self, inputs_shape=None): - pass # no weights in dropout layer - - def forward(self, inputs): - if self.is_train: # this attribute is changed by Model.train() and Model.eval() described above - outputs = tf.nn.dropout(inputs, rate=1 - (self.keep), name=self.name) - else: - outputs = inputs - return outputs + class Dropout(Module): + """ + The :class:`Dropout` class is a noise layer which randomly set some + activations to zero according to a keeping probability. + + Parameters + ---------- + keep : float + The keeping probability. + The lower the probability it is, the more activations are set to zero. + seed : int or None + The seed for random dropout. + name : None or str + A unique layer name. + + Examples + -------- + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.Dropout(keep=0.2)(net) + + """ + + def __init__(self, keep, seed=0, name=None): #"dropout"): + super(Dropout, self).__init__(name) + self.keep = keep + self.seed = seed + + self.build() + self._built = True + + logging.info("Dropout %s: keep: %f " % (self.name, self.keep)) + + def __repr__(self): + s = ('{classname}(keep={keep}') + if self.name is not None: + s += ', name=\'{name}\'' + s += ')' + return s.format(classname=self.__class__.__name__, **self.__dict__) + + def build(self, inputs_shape=None): + self.dropout = tl.ops.Dropout(keep=self.keep, seed=self.seed) + + def forward(self, inputs): + if self.is_train: + outputs = self.dropout(inputs) + else: + outputs = inputs + return outputs Pre-trained CNN ================ @@ -176,42 +234,14 @@ Get entire CNN .. code-block:: python - import tensorflow as tf + import tensorlayer as tl import numpy as np from tensorlayer.models.imagenet_classes import class_names + from examples.model_zoo import vgg16 - vgg = tl.models.vgg16(pretrained=True) + vgg = vgg16(pretrained=True) img = tl.vis.read_image('data/tiger.jpeg') - img = tl.prepro.imresize(img, (224, 224)).astype(np.float32) / 255 + img = tl.prepro.imresize(img, (224, 224)).astype(tl.float32) / 255 output = vgg(img, is_train=False) -Get a part of CNN ------------------- - -.. code-block:: python - - # get VGG without the last layer - cnn = tl.models.vgg16(end_with='fc2_relu', mode='static').as_layer() - # add one more layer and build a new model - ni = tl.layers.Input([None, 224, 224, 3], name="inputs") - nn = cnn(ni) - nn = tl.layers.Dense(n_units=100, name='out')(nn) - model = tl.models.Model(inputs=ni, outputs=nn) - # train your own classifier (only update the last layer) - train_weights = model.get_layer('out').all_weights - -Reuse CNN ------------------- - -.. code-block:: python - - # in dynamic model, we can directly use the same model - # in static model - vgg_layer = tl.models.vgg16().as_layer() - ni_1 = tl.layers.Input([None, 224, 224, 3]) - ni_2 = tl.layers.Input([None, 224, 224, 3]) - a_1 = vgg_layer(ni_1) - a_2 = vgg_layer(ni_2) - M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) - diff --git a/docs/user/get_start_model.rst b/docs/user/get_start_model.rst index 2337a7d..d900f68 100644 --- a/docs/user/get_start_model.rst +++ b/docs/user/get_start_model.rst @@ -5,31 +5,26 @@ Define a model =============== TensorLayer provides two ways to define a model. -Static model allows you to build model in a fluent way while dynamic model allows you to fully control the forward process. +Sequential model allows you to build model in a fluent way while dynamic model allows you to fully control the forward process. -Static model +Sequential model =============== .. code-block:: python - import tensorflow as tf - from tensorlayer.layers import Input, Dropout, Dense - from tensorlayer.models import Model - - def get_model(inputs_shape): - ni = Input(inputs_shape) - nn = Dropout(keep=0.8)(ni) - nn = Dense(n_units=800, act=tf.nn.relu, name="dense1")(nn) # “name" is optional - nn = Dropout(keep=0.8)(nn) - nn = Dense(n_units=800, act=tf.nn.relu)(nn) - nn = Dropout(keep=0.8)(nn) - nn = Dense(n_units=10, act=None)(nn) - M = Model(inputs=ni, outputs=nn, name="mlp") # “name" is optional - return M - - MLP = get_model([None, 784]) - MLP.eval() - outputs = MLP(data) + from tensorlayer.layers import SequentialLayer + from tensorlayer.layers import Dense + import tensorlayer as tl + + def get_model(): + layer_list = [] + layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=784, name='Dense1')) + layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=800, name='Dense2')) + layer_list.append(Dense(n_units=10, act=tl.ReLU, in_channels=800, name='Dense3')) + MLP = SequentialLayer(layer_list) + return MLP + + Dynamic model ======================= @@ -39,15 +34,18 @@ In this case, you need to manually input the output shape of the previous layer .. code-block:: python - class CustomModel(Model): + import tensorlayer as tl + from tensorlayer.layers import Module + from tensorlayer.layers import Dropout, Dense + class CustomModel(Module): def __init__(self): super(CustomModel, self).__init__() self.dropout1 = Dropout(keep=0.8) - self.dense1 = Dense(n_units=800, act=tf.nn.relu, in_channels=784) + self.dense1 = Dense(n_units=800, act=tl.ReLU, in_channels=784) self.dropout2 = Dropout(keep=0.8) - self.dense2 = Dense(n_units=800, act=tf.nn.relu, in_channels=800) + self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) self.dropout3 = Dropout(keep=0.8) self.dense3 = Dense(n_units=10, act=None, in_channels=800) @@ -63,73 +61,83 @@ In this case, you need to manually input the output shape of the previous layer return out MLP = CustomModel() - MLP.eval() + MLP.set_eval() outputs = MLP(data, foo=True) # controls the forward here outputs = MLP(data, foo=False) +Dynamic model do not manually input the output shape +======================= + + +In this case, you do not manually input the output shape of the previous layer to the new layer. + +.. code-block:: python + + import tensorlayer as tl + from tensorlayer.layers import Module + from tensorlayer.layers import Dropout, Dense + class CustomModel(Module): + + def __init__(self): + super(CustomModel, self).__init__() + + self.dropout1 = Dropout(keep=0.8) + self.dense1 = Dense(n_units=800, act=tl.ReLU) + self.dropout2 = Dropout(keep=0.8) + self.dense2 = Dense(n_units=800, act=tl.ReLU) + self.dropout3 = Dropout(keep=0.8) + self.dense3 = Dense(n_units=10, act=None) + + def forward(self, x, foo=False): + z = self.dropout1(x) + z = self.dense1(z) + z = self.dropout2(z) + z = self.dense2(z) + z = self.dropout3(z) + out = self.dense3(z) + if foo: + out = tf.nn.softmax(out) + return out + + MLP = CustomModel() + MLP.init_build(tl.layers.Input(shape=(1, 784))) # init_build must be called to initialize the weights. + MLP.set_eval() + outputs = MLP(data, foo=True) # controls the forward here + outputs = MLP(data, foo=False) + Switching train/test modes ============================= .. code-block:: python # method 1: switch before forward - Model.train() # enable dropout, batch norm moving avg ... - output = Model(train_data) + MLP.set_train() # enable dropout, batch norm moving avg ... + output = MLP(train_data) ... # training code here - Model.eval() # disable dropout, batch norm moving avg ... - output = Model(test_data) + Model.set_eval() # disable dropout, batch norm moving avg ... + output = MLP(test_data) ... # testing code here - # method 2: switch while forward - output = Model(train_data, is_train=True) - output = Model(test_data, is_train=False) + # method 2: Using packaged training modules + model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer) + model.train(n_epoch=n_epoch, train_dataset=train_ds) Reuse weights ======================= -For static model, call the layer multiple time in model creation - -.. code-block:: python - - # create siamese network - - def create_base_network(input_shape): - '''Base network to be shared (eq. to feature extraction). - ''' - input = Input(shape=input_shape) - x = Flatten()(input) - x = Dense(128, act=tf.nn.relu)(x) - x = Dropout(0.9)(x) - x = Dense(128, act=tf.nn.relu)(x) - x = Dropout(0.9)(x) - x = Dense(128, act=tf.nn.relu)(x) - return Model(input, x) - - - def get_siamese_network(input_shape): - """Create siamese network with shared base network as layer - """ - base_layer = create_base_network(input_shape).as_layer() # convert model as layer - - ni_1 = Input(input_shape) - ni_2 = Input(input_shape) - nn_1 = base_layer(ni_1) # call base_layer twice - nn_2 = base_layer(ni_2) - return Model(inputs=[ni_1, ni_2], outputs=[nn_1, nn_2]) - - siamese_net = get_siamese_network([None, 784]) - For dynamic model, call the layer multiple time in forward function .. code-block:: python - class MyModel(Model): + import tensorlayer as tl + from tensorlayer.layers import Module, Dense, Concat + class MyModel(Module): def __init__(self): super(MyModel, self).__init__() - self.dense_shared = Dense(n_units=800, act=tf.nn.relu, in_channels=784) - self.dense1 = Dense(n_units=10, act=tf.nn.relu, in_channels=800) - self.dense2 = Dense(n_units=10, act=tf.nn.relu, in_channels=800) + self.dense_shared = Dense(n_units=800, act=tl.ReLU, in_channels=784) + self.dense1 = Dense(n_units=10, act=tl.ReLU, in_channels=800) + self.dense2 = Dense(n_units=10, act=tl.ReLU, in_channels=800) self.cat = Concat() def forward(self, x): @@ -158,56 +166,6 @@ Print model information # (dropout_2): Dropout(keep=0.8, name='dropout_2') # (dense_2): Dense(n_units=10, None, in_channels='800', name='dense_2') # ) - - import pprint - pprint.pprint(MLP.config) # print the model architecture - # {'inputs': '_inputlayer_1_node_0', - # 'model_architecture': [{'args': {'dtype': tf.float32, - # 'layer_type': 'normal', - # 'name': '_inputlayer_1', - # 'shape': [None, 784]}, - # 'class': '_InputLayer', - # 'prev_layer': None}, - # {'args': {'keep': 0.8, - # 'layer_type': 'normal', - # 'name': 'dropout_1'}, - # 'class': 'Dropout', - # 'prev_layer': ['_inputlayer_1_node_0']}, - # {'args': {'act': 'relu', - # 'layer_type': 'normal', - # 'n_units': 800, - # 'name': 'dense_1'}, - # 'class': 'Dense', - # 'prev_layer': ['dropout_1_node_0']}, - # {'args': {'keep': 0.8, - # 'layer_type': 'normal', - # 'name': 'dropout_2'}, - # 'class': 'Dropout', - # 'prev_layer': ['dense_1_node_0']}, - # {'args': {'act': 'relu', - # 'layer_type': 'normal', - # 'n_units': 800, - # 'name': 'dense_2'}, - # 'class': 'Dense', - # 'prev_layer': ['dropout_2_node_0']}, - # {'args': {'keep': 0.8, - # 'layer_type': 'normal', - # 'name': 'dropout_3'}, - # 'class': 'Dropout', - # 'prev_layer': ['dense_2_node_0']}, - # {'args': {'act': None, - # 'layer_type': 'normal', - # 'n_units': 10, - # 'name': 'dense_3'}, - # 'class': 'Dense', - # 'prev_layer': ['dropout_3_node_0']}], - # 'name': 'mlp', - # 'outputs': 'dense_3_node_0', - # 'version_info': {'backend': 'tensorflow', - # 'backend_version': '2.0.0-alpha0', - # 'save_date': None, - # 'tensorlayer_version': '2.1.0', - # 'training_device': 'gpu'}} Get specific weights ======================= @@ -220,10 +178,6 @@ We can get the specific weights by indexing or naming. all_weights = MLP.all_weights some_weights = MLP.all_weights[1:3] - # naming - some_weights = MLP.get_layer('dense1').all_weights - - Save and restore model ======================= @@ -235,15 +189,17 @@ Save weights only .. code-block:: python - MLP.save_weights('model_weights.h5') # by default, file will be in hdf5 format - MLP.load_weights('model_weights.h5') + MLP.save_weights('./model_weights.npz') # by default, file will be in hdf5 format + MLP.load_weights('./model_weights.npz') -Save model architecture and weights (optional) +Save model weights (optional) ----------------------------------------------- .. code-block:: python - # When using Model.load(), there is no need to reimplement or declare the architecture of the model explicitly in code - MLP.save('model.h5', save_weights=True) - MLP = Model.load('model.h5', load_weights=True) + # When using packaged training modules. Saving and loading the model can be done as follows + model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer) + model.train(n_epoch=n_epoch, train_dataset=train_ds) + model.save_weights('./model.npz', format='npz_dict') + model.load_weights('./model.npz', format='npz_dict') diff --git a/docs/user/installation.rst b/docs/user/installation.rst index 3ba467f..7f178ea 100644 --- a/docs/user/installation.rst +++ b/docs/user/installation.rst @@ -15,8 +15,9 @@ Mac OX, Linux and Windows, or ask for help on `tensorlayer@gmail.com `_. -Install TensorFlow +Install Backend ========================= +TensorLayer supports multiple deep learning backends, default TensorFlow as backend also supports MindSpore and PaddlePaddle. .. code-block:: bash @@ -24,9 +25,24 @@ Install TensorFlow pip3 install tensorflow-gpu # GPU version pip3 install tensorflow # CPU version + The installation instructions of TensorFlow are written to be very detailed on `TensorFlow`_ website. However, there are something need to be considered. For example, `TensorFlow`_ officially supports GPU acceleration for Linux, Mac OX and Windows at present. For ARM processor architecture, you need to install TensorFlow from source. +If you want to use mindspore backend, you should install mindspore==1.2.1. + +.. code-block:: bash + + pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.2.1/MindSpore/gpu/ubuntu_x86/cuda-10.1/mindspore_gpu-1.2.1-cp37-cp37m-linux_x86_64.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple + + +If you want to use paddlepaddle backend, you should install paddlepaddle>=2.1.1 + +.. code-block:: bash + + python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple + + Install TensorLayer ========================= @@ -34,23 +50,21 @@ For stable version: .. code-block:: bash - pip3 install tensorlayer + pip3 install tensorlayer3 - pip install tensorlayer -i https://pypi.tuna.tsinghua.edu.cn/simple (faster in China) + pip install tensorlayer3 -i https://pypi.tuna.tsinghua.edu.cn/simple (faster in China) -For latest version, please install from Github. +For latest version, please install from OpenI. .. code-block:: bash - pip3 install git+https://github.com/tensorlayer/tensorlayer.git - or - pip3 install https://github.com/tensorlayer/tensorlayer/archive/master.zip + pip3 install git+https://git.openi.org.cn/TensorLayer/tensorlayer3.0.git For developers, you should clone the folder to your local machine and put it along with your project scripts. .. code-block:: bash - git clone https://github.com/tensorlayer/tensorlayer.git + git clone https://git.openi.org.cn/TensorLayer/tensorlayer3.0.git Alternatively, you can build from the source. @@ -58,7 +72,7 @@ Alternatively, you can build from the source. .. code-block:: bash # First clone the repository and change the current directory to the newly cloned repository - git clone https://github.com/tensorlayer/tensorlayer.git + git clone https://git.openi.org.cn/TensorLayer/tensorlayer3.0.git cd tensorlayer # Install virtualenv if necessary @@ -85,6 +99,12 @@ Alternatively, you can build from the source. # for a machine **with** an NVIDIA GPU pip3 install -e ".[all_gpu_dev]" +If you want install TensorLayer 2.X + +.. code-block:: bash + + [stable version] pip3 install tensorlayer==2.x.x + If you want install TensorLayer 1.X, the simplest way to install TensorLayer 1.X is as follow. It will also install the numpy and matplotlib automatically. .. code-block:: bash @@ -190,17 +210,6 @@ The NVIDIA CUDA® Deep Neural Network library (cuDNN) is a GPU-accelerated libra After extracting cuDNN, you will get three folders (bin, lib, include). Then these folders should be copied to CUDA installation. (The default installation directory is `C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0`) -Installing TensorLayer ------------------------- -For TensorLayer, please refer to the steps mentioned above. - -.. code-block:: bash - - pip3 install tensorflow #CPU version - pip3 install tensorflow-gpu   #GPU version (GPU version and CPU version just choose one) - pip3 install tensorlayer       #Install tensorlayer - - Issue ======= diff --git a/examples/basic_tutorials/tutorial_LayerList.py b/examples/basic_tutorials/tutorial_LayerList.py index 2b60fec..335c006 100644 --- a/examples/basic_tutorials/tutorial_LayerList.py +++ b/examples/basic_tutorials/tutorial_LayerList.py @@ -1,43 +1,38 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- -from tensorlayer.layers import LayerList -from tensorlayer.layers import Dense +from tensorlayer.layers import Module, LayerList, Dense import tensorlayer as tl -import numpy as np - -layer_list = [] -layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=784, name='Dense1')) -layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=800, name='Dense2')) -layer_list.append(Dense(n_units=10, act=tl.ReLU, in_channels=800, name='Dense3')) -MLP = LayerList(layer_list) - -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) - -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - yield (_input, np.array(_target)) - -n_epoch = 50 -batch_size = 128 -print_freq = 2 -shuffle_buffer_size = 128 - -# train_weights = MLP.trainable_weights -# print(train_weights) -optimizer = tl.optimizers.Momentum(0.05, 0.9) -train_ds = tl.dataflow.FromGenerator( - generator_train, output_types=(tl.float32, tl.int32) , column_names=['data', 'label'] -) -train_ds = tl.dataflow.Shuffle(train_ds,shuffle_buffer_size) -train_ds = tl.dataflow.Batch(train_ds,batch_size) - - -model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) -model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) -model.save_weights('./model.npz', format='npz_dict') -model.load_weights('./model.npz', format='npz_dict') \ No newline at end of file + +d1 = Dense(n_units=800, act=tl.ReLU, in_channels=784, name='Dense1') +d2 = Dense(n_units=800, act=tl.ReLU, in_channels=800, name='Dense2') +d3 = Dense(n_units=10, act=tl.ReLU, in_channels=800, name='Dense3') + +layer_list = LayerList([d1, d2]) +# Inserts a given d2 before a given index in the list +layer_list.insert(1, d2) +layer_list.insert(2, d2) +# Appends d2 from a Python iterable to the end of the list. +layer_list.extend([d2]) +# Appends a given d3 to the end of the list. +layer_list.append(d3) + +print(layer_list) + + +class model(Module): + + def __init__(self): + super(model, self).__init__() + self._list = layer_list + + def forward(self, inputs): + output = self._list[0](inputs) + for i in range(1, len(self._list)): + output = self._list[i](output) + return output + + +net = model() +print(net) +print(net(tl.layers.Input((10, 784)))) diff --git a/examples/basic_tutorials/tutorial_SequentialLayer.py b/examples/basic_tutorials/tutorial_SequentialLayer.py new file mode 100644 index 0000000..1780d3f --- /dev/null +++ b/examples/basic_tutorials/tutorial_SequentialLayer.py @@ -0,0 +1,46 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +import os +os.environ['TL_BACKEND'] = 'tensorflow' + +from tensorlayer.layers import SequentialLayer +from tensorlayer.layers import Dense +import tensorlayer as tl +import numpy as np + +layer_list = [] +layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=784, name='Dense1')) +layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=800, name='Dense2')) +layer_list.append(Dense(n_units=10, act=tl.ReLU, in_channels=800, name='Dense3')) +MLP = SequentialLayer(layer_list) + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + + +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + yield (_input, np.array(_target)) + + +n_epoch = 50 +batch_size = 128 +print_freq = 2 +shuffle_buffer_size = 128 + +# train_weights = MLP.trainable_weights +# print(train_weights) +optimizer = tl.optimizers.Momentum(0.05, 0.9) +train_ds = tl.dataflow.FromGenerator( + generator_train, output_types=(tl.float32, tl.int32), column_names=['data', 'label'] +) +train_ds = tl.dataflow.Shuffle(train_ds, shuffle_buffer_size) +train_ds = tl.dataflow.Batch(train_ds, batch_size) + +model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer) +model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) +model.save_weights('./model.npz', format='npz_dict') +model.load_weights('./model.npz', format='npz_dict') diff --git a/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py b/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py new file mode 100644 index 0000000..3318b69 --- /dev/null +++ b/examples/basic_tutorials/tutorial_automatic_inference_input _shape.py @@ -0,0 +1,95 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +import os +os.environ['TL_BACKEND'] = 'tensorflow' + +import numpy as np +import time +import tensorflow as tf +import tensorlayer as tl +from tensorlayer.layers import Module +from tensorlayer.layers import Dense, Dropout, BatchNorm1d + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + + +class CustomModel(Module): + + def __init__(self): + super(CustomModel, self).__init__() + self.dropout1 = Dropout(keep=0.8) + self.dense1 = Dense(n_units=800) + self.batchnorm = BatchNorm1d(act=tl.ReLU) + self.dropout2 = Dropout(keep=0.8) + self.dense2 = Dense(n_units=800, act=tl.ReLU) + self.dropout3 = Dropout(keep=0.8) + self.dense3 = Dense(n_units=10, act=tl.ReLU) + + def forward(self, x, foo=None): + z = self.dropout1(x) + z = self.dense1(z) + z = self.batchnorm(z) + z = self.dropout2(z) + z = self.dense2(z) + z = self.dropout3(z) + out = self.dense3(z) + if foo is not None: + out = tl.ops.relu(out) + return out + + +MLP = CustomModel() +# Automatic inference input of shape. +# If Layer has no input in_channels, init_build(input) must be called to initialize the weights. +MLP.init_build(tl.layers.Input(shape=(1, 784))) + +n_epoch = 50 +batch_size = 500 +print_freq = 5 +train_weights = MLP.trainable_weights +optimizer = tl.optimizers.Adam(lr=0.0001) + +for epoch in range(n_epoch): ## iterate the dataset n_epoch times + start_time = time.time() + ## iterate over the entire training set once (shuffle the data via training) + for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): + MLP.set_train() # enable dropout + with tf.GradientTape() as tape: + ## compute outputs + _logits = MLP(X_batch) + ## compute loss and update model + _loss = tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='train_loss') + grad = tape.gradient(_loss, train_weights) + optimizer.apply_gradients(zip(grad, train_weights)) + + ## use training and evaluation sets to evaluate the model every print_freq epoch + if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): + _logits = MLP(X_batch) + train_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='eval_loss') + train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) + + val_loss, val_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): + _logits = MLP(X_batch) # is_train=False, disable dropout + val_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='eval_loss') + val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 + print(" val loss: {}".format(val_loss / n_iter)) + print(" val acc: {}".format(val_acc / n_iter)) + +## use testing data to evaluate the model +MLP.set_eval() +test_loss, test_acc, n_iter = 0, 0, 0 +for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): + _logits = MLP(X_batch, foo=1) + test_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='test_loss') + test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) + n_iter += 1 +print(" test foo=1 loss: {}".format(test_loss / n_iter)) +print(" test foo=1 acc: {}".format(test_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_MS_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py similarity index 74% rename from examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_MS_backend.py rename to examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py index 02ab3e8..ae8686c 100644 --- a/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_MS_backend.py +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_mindspore_backend.py @@ -1,6 +1,9 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- +import os +os.environ['TL_BACKEND'] = 'mindspore' + import time import numpy as np import multiprocessing @@ -18,23 +21,23 @@ import mindspore.ops.operations as P # enable debug logging tl.logging.set_verbosity(tl.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) + class CNN(Module): + def __init__(self): super(CNN, self).__init__() - self.conv1 = Conv2d(64, (5, 5), (2, 2), padding='SAME', b_init=None, name='conv1', in_channels=3, act=tl.ReLU, data_format='channels_first') - self.bn = BatchNorm2d(num_features=64, act=tl.ReLU, data_format='channels_first') - self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1', data_format='channels_first') - self.conv2 = Conv2d(128, (5, 5), (2, 2), padding='SAME', act=tl.ReLU, b_init=None, name='conv2', in_channels=64, data_format='channels_first') - self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2', data_format='channels_first') + self.conv1 = Conv2d(64, (5, 5), (2, 2), b_init=None, name='conv1', in_channels=3, act=tl.ReLU) + self.bn = BatchNorm2d(num_features=64, act=tl.ReLU) + self.maxpool1 = MaxPool2d((3, 3), (2, 2), name='pool1') + self.conv2 = Conv2d(128, (5, 5), (2, 2), act=tl.ReLU, b_init=None, name='conv2', in_channels=64) + self.maxpool2 = MaxPool2d((3, 3), (2, 2), name='pool2') self.flatten = Flatten(name='flatten') - self.dense1 = Dense(120, act=tl.ReLU, name='dense1relu', in_channels=4608) + self.dense1 = Dense(120, act=tl.ReLU, name='dense1relu', in_channels=512) self.dense2 = Dense(84, act=tl.ReLU, name='dense2relu', in_channels=120) self.dense3 = Dense(10, act=None, name='output', in_channels=84) - def forward(self, x): z = self.conv1(x) z = self.bn(z) @@ -47,14 +50,16 @@ class CNN(Module): z = self.dense3(z) return z + # training settings batch_size = 128 n_epoch = 500 shuffle_buffer_size = 128 - # prepare cifar10 data X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) + + def generator_train(): inputs = X_train targets = y_train @@ -73,6 +78,7 @@ def generator_test(): # yield _input.encode('utf-8'), _target.encode('utf-8') yield _input, _target + def _map_fn_train(img, target): # 1. Randomly crop a [height, width] section of the image. img = tf.image.random_crop(img, [24, 24, 3]) @@ -127,8 +133,6 @@ for epoch in range(n_epoch): for X_batch, y_batch in train_ds: X_batch = ms.Tensor(X_batch.numpy(), dtype=ms.float32) y_batch = ms.Tensor(y_batch.numpy(), dtype=ms.int32) - X_batch = tl.nhwc_to_nchw(X_batch) - y_batch = tl.nhwc_to_nchw(y_batch) output = net(X_batch) loss_output = criterion(output, y_batch) grads = train_network(X_batch, y_batch) @@ -141,26 +145,3 @@ for epoch in range(n_epoch): print(" train loss: {}".format(train_loss / n_iter)) print(" train acc: {}".format(train_acc / n_iter)) print(" loss ", loss) - -# start_time = time.time() - -# train_loss, train_acc, n_iter = 0, 0, 0 -# for X_batch, y_batch in train_ds: -# net.set_train() - -# with tf.GradientTape() as tape: -# # compute outputs -# _logits = net(X_batch) -# # compute loss and update model -# _loss_ce = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') - -# grad = tape.gradient(_loss_ce, train_weights) -# optimizer.apply_gradients(zip(grad, train_weights)) - -# train_loss += _loss_ce -# train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) -# n_iter += 1 - -# print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) -# print(" train loss: {}".format(train_loss / n_iter)) -# print(" train acc: {}".format(train_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_paddle_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_paddle_backend.py new file mode 100644 index 0000000..b9cef9c --- /dev/null +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_paddle_backend.py @@ -0,0 +1,166 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- +# The tensorlayer and tensorflow operators can be mixed +import os +os.environ['TL_BACKEND'] = 'paddle' + +import time +import numpy as np +import multiprocessing +import tensorflow as tf +import paddle as pd +from tensorlayer.layers import Module +import tensorlayer as tl +from tensorlayer.layers import (Conv2d, Dense, Flatten, MaxPool2d, BatchNorm2d) + +# enable debug logging +tl.logging.set_verbosity(tl.logging.DEBUG) + +# prepare cifar10 data +X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) + + +class CNN(Module): + + def __init__(self): + super(CNN, self).__init__() + # weights init + W_init = tl.initializers.truncated_normal(stddev=5e-2) + W_init2 = tl.initializers.truncated_normal(stddev=0.04) + b_init2 = tl.initializers.constant(value=0.1) + + self.conv1 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1', in_channels=3) + self.bn1 = BatchNorm2d(num_features=64, act=tl.ReLU) + self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1') + + self.conv2 = Conv2d( + 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv2', in_channels=64 + ) + self.bn2 = BatchNorm2d(num_features=64, act=tl.ReLU) + self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2') + + self.flatten = Flatten(name='flatten') + self.dense1 = Dense(384, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense1relu', in_channels=2304) + self.dense2 = Dense(192, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense2relu', in_channels=384) + self.dense3 = Dense(10, act=None, W_init=W_init2, name='output', in_channels=192) + + def forward(self, x): + z = self.conv1(x) + z = self.bn1(z) + z = self.maxpool1(z) + z = self.conv2(z) + z = self.bn2(z) + z = self.maxpool2(z) + z = self.flatten(z) + z = self.dense1(z) + z = self.dense2(z) + z = self.dense3(z) + return z + + +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + # yield _input.encode('utf-8'), _target.encode('utf-8') + yield _input, _target + + +def generator_test(): + inputs = X_test + targets = y_test + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + # yield _input.encode('utf-8'), _target.encode('utf-8') + yield _input, _target + + +def _map_fn_train(img, target): + # 1. Randomly crop a [height, width] section of the image. + img = tf.image.random_crop(img, [24, 24, 3]) + # 2. Randomly flip the image horizontally. + img = tf.image.random_flip_left_right(img) + # 3. Randomly change brightness. + img = tf.image.random_brightness(img, max_delta=63) + # 4. Randomly change contrast. + img = tf.image.random_contrast(img, lower=0.2, upper=1.8) + # 5. Subtract off the mean and divide by the variance of the pixels. + img = tf.image.per_image_standardization(img) + target = tf.reshape(target, ()) + return img, target + + +def _map_fn_test(img, target): + # 1. Crop the central [height, width] of the image. + img = tf.image.resize_with_pad(img, 24, 24) + # 2. Subtract off the mean and divide by the variance of the pixels. + img = tf.image.per_image_standardization(img) + img = tf.reshape(img, (24, 24, 3)) + target = tf.reshape(target, ()) + return img, target + + +# get the network +net = CNN() + +# training settings +batch_size = 128 +n_epoch = 500 +learning_rate = 0.0001 +print_freq = 5 +shuffle_buffer_size = 128 +metrics = tl.metric.Accuracy() + +train_weights = net.trainable_weights +optimizer = tl.optimizers.Adam(learning_rate) +# looking for decay learning rate? see https://github.com/tensorlayer/srgan/blob/master/train.py + +# dataset API and augmentation +train_ds = tf.data.Dataset.from_generator( + generator_train, output_types=(tf.float32, tf.int32) +) # , output_shapes=((24, 24, 3), (1))) +train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) +# train_ds = train_ds.repeat(n_epoch) +train_ds = train_ds.shuffle(shuffle_buffer_size) +train_ds = train_ds.prefetch(buffer_size=4096) +train_ds = train_ds.batch(batch_size) +# value = train_ds.make_one_shot_iterator().get_next() + +test_ds = tf.data.Dataset.from_generator( + generator_test, output_types=(tf.float32, tf.int32) +) # , output_shapes=((24, 24, 3), (1))) +# test_ds = test_ds.shuffle(shuffle_buffer_size) +test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count()) +# test_ds = test_ds.repeat(n_epoch) +test_ds = test_ds.prefetch(buffer_size=4096) +test_ds = test_ds.batch(batch_size) +# value_test = test_ds.make_one_shot_iterator().get_next() + +for epoch in range(n_epoch): + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in test_ds: + start_time = time.time() + X_batch = pd.to_tensor(X_batch.numpy(), dtype=tl.float32) + y_batch = pd.to_tensor(y_batch.numpy(), dtype=tl.int64) + net.set_train() + + output = net(X_batch) + loss = pd.nn.functional.cross_entropy(output, y_batch) + loss_ce = loss.numpy() + params_grads = optimizer.gradient(loss, train_weights) + optimizer.apply_gradients(params_grads) + + train_loss += loss_ce + + if metrics: + metrics.update(output, y_batch) + train_acc += metrics.result() + metrics.reset() + n_iter += 1 + + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_TF_backend.py b/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py similarity index 93% rename from examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_TF_backend.py rename to examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py index f399bef..d3619dc 100644 --- a/examples/basic_tutorials/tutorial_cifar10_cnn_dynamic_TF_backend.py +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_tensorflow_backend.py @@ -1,5 +1,8 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- +# The tensorlayer and tensorflow operators can be mixed +import os +os.environ['TL_BACKEND'] = 'tensorflow' import time import numpy as np @@ -12,7 +15,6 @@ from tensorlayer.layers import (Conv2d, Dense, Flatten, MaxPool2d, BatchNorm2d) # enable debug logging tl.logging.set_verbosity(tl.logging.DEBUG) -tl.logging.set_verbosity(tl.logging.DEBUG) # prepare cifar10 data X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) @@ -148,7 +150,7 @@ for epoch in range(n_epoch): # compute outputs _logits = net(X_batch) # compute loss and update model - _loss_ce = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') + _loss_ce = tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='train_loss') grad = tape.gradient(_loss_ce, train_weights) optimizer.apply_gradients(zip(grad, train_weights)) @@ -164,22 +166,22 @@ for epoch in range(n_epoch): # use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - net.eval() + net.set_eval() val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + val_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='eval_loss') val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" val loss: {}".format(val_loss / n_iter)) print(" val acc: {}".format(val_acc / n_iter)) # use testing data to evaluate the model -net.eval() +net.set_eval() test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') + test_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='test_loss') test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" test loss: {}".format(test_loss / n_iter)) diff --git a/examples/basic_tutorials/tutorial_cifar10_cnn_tensorlayer.py b/examples/basic_tutorials/tutorial_cifar10_cnn_tensorlayer.py new file mode 100644 index 0000000..de592fa --- /dev/null +++ b/examples/basic_tutorials/tutorial_cifar10_cnn_tensorlayer.py @@ -0,0 +1,181 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import os +# os.environ['TL_BACKEND'] = 'paddle' +os.environ['TL_BACKEND'] = 'tensorflow' +# os.environ['TL_BACKEND'] = 'mindspore' + +import time +import multiprocessing +import tensorflow as tf + +from tensorlayer.models import TrainOneStep +from tensorlayer.layers import Module +import tensorlayer as tl +from tensorlayer.layers import (Conv2d, Dense, Flatten, MaxPool2d, BatchNorm2d) + +# enable debug logging +tl.logging.set_verbosity(tl.logging.DEBUG) + +# prepare cifar10 data +X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) + + +class CNN(Module): + + def __init__(self): + super(CNN, self).__init__() + # weights init + W_init = tl.initializers.truncated_normal(stddev=5e-2) + W_init2 = tl.initializers.truncated_normal(stddev=0.04) + b_init2 = tl.initializers.constant(value=0.1) + + self.conv1 = Conv2d(64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='conv1', in_channels=3) + self.bn = BatchNorm2d(num_features=64, act=tl.ReLU) + self.maxpool1 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1') + + self.conv2 = Conv2d( + 64, (5, 5), (1, 1), padding='SAME', act=tl.ReLU, W_init=W_init, b_init=None, name='conv2', in_channels=64 + ) + self.maxpool2 = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2') + + self.flatten = Flatten(name='flatten') + self.dense1 = Dense(384, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense1relu', in_channels=2304) + self.dense2 = Dense(192, act=tl.ReLU, W_init=W_init2, b_init=b_init2, name='dense2relu', in_channels=384) + self.dense3 = Dense(10, act=None, W_init=W_init2, name='output', in_channels=192) + + def forward(self, x): + z = self.conv1(x) + z = self.bn(z) + z = self.maxpool1(z) + z = self.conv2(z) + z = self.maxpool2(z) + z = self.flatten(z) + z = self.dense1(z) + z = self.dense2(z) + z = self.dense3(z) + return z + + +# get the network +net = CNN() + +# training settings +batch_size = 128 +n_epoch = 500 +learning_rate = 0.0001 +print_freq = 5 +n_step_epoch = int(len(y_train) / batch_size) +n_step = n_epoch * n_step_epoch +shuffle_buffer_size = 128 + +train_weights = net.trainable_weights +optimizer = tl.optimizers.Adam(learning_rate) +# looking for decay learning rate? see https://github.com/tensorlayer/srgan/blob/master/train.py +metrics = tl.metric.Accuracy() + + +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + # yield _input.encode('utf-8'), _target.encode('utf-8') + yield _input, _target + + +def generator_test(): + inputs = X_test + targets = y_test + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + # yield _input.encode('utf-8'), _target.encode('utf-8') + yield _input, _target + + +def _map_fn_train(img, target): + # 1. Randomly crop a [height, width] section of the image. + img = tf.image.random_crop(img, [24, 24, 3]) + # 2. Randomly flip the image horizontally. + img = tf.image.random_flip_left_right(img) + # 3. Randomly change brightness. + img = tf.image.random_brightness(img, max_delta=63) + # 4. Randomly change contrast. + img = tf.image.random_contrast(img, lower=0.2, upper=1.8) + # 5. Subtract off the mean and divide by the variance of the pixels. + img = tf.image.per_image_standardization(img) + target = tf.reshape(target, ()) + return img, target + + +def _map_fn_test(img, target): + # 1. Crop the central [height, width] of the image. + img = tf.image.resize_with_pad(img, 24, 24) + # 2. Subtract off the mean and divide by the variance of the pixels. + img = tf.image.per_image_standardization(img) + img = tf.reshape(img, (24, 24, 3)) + target = tf.reshape(target, ()) + return img, target + + +# dataset API and augmentation +train_ds = tf.data.Dataset.from_generator( + generator_train, output_types=(tf.float32, tf.int32) +) # , output_shapes=((24, 24, 3), (1))) +train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) +# train_ds = train_ds.repeat(n_epoch) +train_ds = train_ds.shuffle(shuffle_buffer_size) +train_ds = train_ds.prefetch(buffer_size=4096) +train_ds = train_ds.batch(batch_size) +# value = train_ds.make_one_shot_iterator().get_next() + +test_ds = tf.data.Dataset.from_generator( + generator_test, output_types=(tf.float32, tf.int32) +) # , output_shapes=((24, 24, 3), (1))) +# test_ds = test_ds.shuffle(shuffle_buffer_size) +test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count()) +# test_ds = test_ds.repeat(n_epoch) +test_ds = test_ds.prefetch(buffer_size=4096) +test_ds = test_ds.batch(batch_size) +# value_test = test_ds.make_one_shot_iterator().get_next() + + +class WithLoss(Module): + + def __init__(self, net, loss_fn): + super(WithLoss, self).__init__() + self._net = net + self._loss_fn = loss_fn + + def forward(self, data, label): + out = self._net(data) + loss = self._loss_fn(out, label) + return loss + + +net_with_loss = WithLoss(net, loss_fn=tl.cost.softmax_cross_entropy_with_logits) +net_with_train = TrainOneStep(net_with_loss, optimizer, train_weights) + +for epoch in range(n_epoch): + start_time = time.time() + net.set_train() + train_loss, train_acc, n_iter = 0, 0, 0 + for X_batch, y_batch in train_ds: + + X_batch = tl.ops.convert_to_tensor(X_batch.numpy(), dtype=tl.float32) + y_batch = tl.ops.convert_to_tensor(y_batch.numpy(), dtype=tl.int64) + + _loss_ce = net_with_train(X_batch, y_batch) + train_loss += _loss_ce + + n_iter += 1 + _logits = net(X_batch) + metrics.update(_logits, y_batch) + train_acc += metrics.result() + metrics.reset() + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" train loss: {}".format(train_loss / n_iter)) + print(" train acc: {}".format(train_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_dataflow.py b/examples/basic_tutorials/tutorial_dataflow.py new file mode 100644 index 0000000..57e1cd2 --- /dev/null +++ b/examples/basic_tutorials/tutorial_dataflow.py @@ -0,0 +1,84 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import os +os.environ['TL_BACKEND'] = 'tensorflow' +# os.environ['TL_BACKEND'] = 'mindspore' +# os.environ['TL_BACKEND'] = 'paddle' + +import tensorlayer as tl +from tensorlayer.layers import Module +from tensorlayer.layers import Dense, Flatten +from tensorlayer.vision.transforms import Normalize, Compose +from tensorlayer.dataflow import Dataset, IterableDataset + +transform = Compose([Normalize(mean=[127.5], std=[127.5], data_format='HWC')]) + +print('download training data and load training data') + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) +X_train = X_train * 255 + +print('load finished') + + +class mnistdataset(Dataset): + + def __init__(self, data=X_train, label=y_train, transform=transform): + self.data = data + self.label = label + self.transform = transform + + def __getitem__(self, index): + data = self.data[index].astype('float32') + data = self.transform(data) + label = self.label[index].astype('int64') + + return data, label + + def __len__(self): + + return len(self.data) + + +class mnistdataset1(IterableDataset): + + def __init__(self, data=X_train, label=y_train, transform=transform): + self.data = data + self.label = label + self.transform = transform + + def __iter__(self): + + for i in range(len(self.data)): + data = self.data[i].astype('float32') + data = self.transform(data) + label = self.label[i].astype('int64') + yield data, label + + +class MLP(Module): + + def __init__(self): + super(MLP, self).__init__() + self.linear1 = Dense(n_units=120, in_channels=784, act=tl.ReLU) + self.linear2 = Dense(n_units=84, in_channels=120, act=tl.ReLU) + self.linear3 = Dense(n_units=10, in_channels=84) + self.flatten = Flatten() + + def forward(self, x): + x = self.flatten(x) + x = self.linear1(x) + x = self.linear2(x) + x = self.linear3(x) + return x + + +train_dataset = mnistdataset1(data=X_train, label=y_train, transform=transform) +train_dataset = tl.dataflow.FromGenerator( + train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label'] +) +train_loader = tl.dataflow.Dataloader(train_dataset, batch_size=128, shuffle=False) + +for i in train_loader: + print(i[0].shape, i[1]) diff --git a/examples/basic_tutorials/tutorial_mnist_gan_tensorlayer.py b/examples/basic_tutorials/tutorial_mnist_gan_tensorlayer.py new file mode 100644 index 0000000..c76a8e3 --- /dev/null +++ b/examples/basic_tutorials/tutorial_mnist_gan_tensorlayer.py @@ -0,0 +1,154 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import os +# os.environ['TL_BACKEND'] = 'paddle' +os.environ['TL_BACKEND'] = 'tensorflow' +# os.environ['TL_BACKEND'] = 'mindspore' + +import time +import numpy as np +import tensorlayer as tl +from tensorlayer.layers import Module, Dense +from tensorlayer.dataflow import Dataset +from tensorlayer.models import TrainOneStep + +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + + +class mnistdataset(Dataset): + + def __init__(self, data=X_train, label=y_train): + self.data = data + self.label = label + + def __getitem__(self, index): + data = self.data[index].astype('float32') + label = self.label[index].astype('int64') + return data, label + + def __len__(self): + return len(self.data) + + +batch_size = 128 +train_dataset = mnistdataset(data=X_train, label=y_train) +train_dataset = tl.dataflow.FromGenerator( + train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label'] +) +train_loader = tl.dataflow.Dataloader(train_dataset, batch_size=batch_size, shuffle=True) + + +class generator(Module): + + def __init__(self): + super(generator, self).__init__() + self.g_fc1 = Dense(n_units=256, in_channels=100, act=tl.ReLU) + self.g_fc2 = Dense(n_units=256, in_channels=256, act=tl.ReLU) + self.g_fc3 = Dense(n_units=784, in_channels=256, act=tl.Tanh) + + def forward(self, x): + out = self.g_fc1(x) + out = self.g_fc2(out) + out = self.g_fc3(out) + return out + + +class discriminator(Module): + + def __init__(self): + super(discriminator, self).__init__() + self.d_fc1 = Dense(n_units=256, in_channels=784, act=tl.LeakyReLU) + self.d_fc2 = Dense(n_units=256, in_channels=256, act=tl.LeakyReLU) + self.d_fc3 = Dense(n_units=1, in_channels=256, act=tl.Sigmoid) + + def forward(self, x): + out = self.d_fc1(x) + out = self.d_fc2(out) + out = self.d_fc3(out) + return out + + +G = generator() +D = discriminator() + + +class WithLossG(Module): + + def __init__(self, G, D, loss_fn): + super(WithLossG, self).__init__() + self.g_net = G + self.d_net = D + self.loss_fn = loss_fn + + def forward(self, g_data, label): + fake_image = self.g_net(g_data) + logits_fake = self.d_net(fake_image) + valid = tl.convert_to_tensor(np.ones(logits_fake.shape), dtype=tl.float32) + loss = self.loss_fn(logits_fake, valid) + return loss + + +class WithLossD(Module): + + def __init__(self, G, D, loss_fn): + super(WithLossD, self).__init__() + self.g_net = G + self.d_net = D + self.loss_fn = loss_fn + + def forward(self, real_data, g_data): + logits_real = self.d_net(real_data) + fake_image = self.g_net(g_data) + logits_fake = self.d_net(fake_image) + + valid = tl.convert_to_tensor(np.ones(logits_real.shape), dtype=tl.float32) + fake = tl.convert_to_tensor(np.zeros(logits_fake.shape), dtype=tl.float32) + + loss = self.loss_fn(logits_real, valid) + self.loss_fn(logits_fake, fake) + return loss + + +# loss_fn = tl.cost.sigmoid_cross_entropy +# optimizer = tl.optimizers.Momentum(learning_rate=5e-4, momentum=0.5) +loss_fn = tl.cost.mean_squared_error +optimizer_g = tl.optimizers.Adam(learning_rate=3e-4, beta_1=0.5, beta_2=0.999) +optimizer_d = tl.optimizers.Adam(learning_rate=3e-4) + +g_weights = G.trainable_weights +d_weights = D.trainable_weights +net_with_loss_G = WithLossG(G, D, loss_fn) +net_with_loss_D = WithLossD(G, D, loss_fn) +train_one_setp_g = TrainOneStep(net_with_loss_G, optimizer_g, g_weights) +train_one_setp_d = TrainOneStep(net_with_loss_D, optimizer_d, d_weights) +n_epoch = 50 + + +def plot_fake_image(fake_image, num): + fake_image = tl.reshape(fake_image, shape=(num, 28, 28)) + fake_image = tl.convert_to_numpy(fake_image) + import matplotlib.pylab as plt + for i in range(num): + plt.subplot(int(np.sqrt(num)), int(np.sqrt(num)), i + 1) + plt.imshow(fake_image[i]) + plt.show() + + +for epoch in range(n_epoch): + d_loss, g_loss = 0.0, 0.0 + n_iter = 0 + start_time = time.time() + for data, label in train_loader: + noise = tl.convert_to_tensor(np.random.random(size=(batch_size, 100)), dtype=tl.float32) + + _loss_d = train_one_setp_d(data, noise) + _loss_g = train_one_setp_g(noise, label) + d_loss += _loss_d + g_loss += _loss_g + + n_iter += 1 + print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) + print(" d loss: {}".format(d_loss / n_iter)) + print(" g loss: {}".format(g_loss / n_iter)) + fake_image = G(tl.convert_to_tensor(np.random.random(size=(36, 100)), dtype=tl.float32)) + plot_fake_image(fake_image, 36) diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_dynamci_dragon.py b/examples/basic_tutorials/tutorial_mnist_mlp_dynamci_dragon.py deleted file mode 100644 index 9c06ec5..0000000 --- a/examples/basic_tutorials/tutorial_mnist_mlp_dynamci_dragon.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import os -os.environ['TL_BACKEND'] = 'dragon' - -from tensorlayer.layers import Module -from tensorlayer.layers import Dense -import tensorlayer as tl -import dragon as dg -import time -import argparse -import numpy as np - -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) - - -class CustomModel(Module): - - def __init__(self): - super(CustomModel, self).__init__() - self.dense1 = Dense(n_units=800, act=tl.ReLU, in_channels=784) - self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) - self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) - - def forward(self, x, foo=None): - z = self.dense1(x) - z = self.dense2(z) - out = self.dense3(z) - return out - - -def parse_args(): - """Parse the arguments.""" - parser = argparse.ArgumentParser(description='Train a cifar10 resnet') - parser.add_argument('--execution', default='EAGER_MODE', type=str, help='The execution mode') - parser.add_argument('--seed', default=1337, type=int, help='The random seed') - parser.add_argument('--cuda', default=-1, type=int, help='The cuda device to use') - return parser.parse_args() - - -class Classifier(object): - """The base classifier class.""" - - # TensorSpec for graph execution - image_spec = dg.Tensor([None, 3, 32, 32], 'float32') - label_spec = dg.Tensor([None], 'int64') - - def __init__(self, optimizer): - super(Classifier, self).__init__() - self.net = CustomModel() - self.optimizer = optimizer - self.params = self.net.trainable_weights - - def step(self, image, label): - with dg.GradientTape() as tape: - logit = self.net(image) - # logit = dg.cast(logit, 'float64') - logit = dg.cast(dg.math.argmax(logit, -1), 'int64') - label = dg.cast(label, 'int64') - # print("logit :\n", logit, label) - # loss = dg.losses.smooth_l1_loss([logit, label]) - loss = dg.math.sum(logit - label) # dg.losses.sparse_softmax_cross_entropy([logit, label]) - accuracy = dg.math.mean(dg.math.equal([logit, label]).astype('float32')) - grads = tape.gradient(loss, self.params) - self.optimizer.apply_gradients(zip(self.params, grads)) - return loss, accuracy, self.optimizer - - -if __name__ == '__main__': - args = parse_args() - dg.logging.info('Called with args:\n' + str(args)) - - np.random.seed(args.seed) - dg.autograph.set_execution(args.execution) - dg.cuda.set_default_device(args.cuda) - - # Define the model - model = Classifier(dg.optimizers.SGD(base_lr=0.01, momentum=0.9, weight_decay=1e-4)) - - # Compile for graph execution if necessary - if args.execution == 'GRAPH_MODE': - model.step = dg.function( - func=model.step, - input_signature=[model.image_spec, model.label_spec], - ) - - # Main loop - import tensorflow as tf - batch_size = 200 - for i in range(50): - for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - image = dg.EagerTensor(X_batch, copy=False) - label = dg.EagerTensor(y_batch, copy=False, dtype='float32') - loss, accuracy, _ = model.step(image, label) - if i % 20 == 0: - dg.logging.info( - 'Iteration %d, lr = %s, loss = %.5f, accuracy = %.3f' % - (i, str(model.optimizer.base_lr), loss, accuracy) - ) diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_mindspore.py b/examples/basic_tutorials/tutorial_mnist_mlp_mindspore.py deleted file mode 100644 index 3e552d3..0000000 --- a/examples/basic_tutorials/tutorial_mnist_mlp_mindspore.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import numpy as np -import mindspore.nn as nn -import mindspore.ops.operations as P -from mindspore.ops import composite as C -from mindspore.common import dtype as mstype -from mindspore import context, Tensor, ParameterTuple -from mindspore.common.initializer import TruncatedNormal -from mindspore.nn import Dense, WithLossCell, SoftmaxCrossEntropyWithLogits, Momentum -import tensorlayer as tl -import mindspore as ms -import tensorflow as tf -import time - -context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") - - -def fc_with_initialize(input_channels, out_channels): - """weight initial for fc layer""" - weight = weight_variable() - bias = weight_variable() - return nn.Dense(input_channels, out_channels, weight, bias) - - -def weight_variable(): - """weight initial""" - return TruncatedNormal(0.02) - - -class MLP(nn.Cell): - """ - Lenet network - Args: - num_class (int): Num classes. Default: 10. - - Returns: - Tensor, output tensor - - Examples: - >>> MLP(num_class=10) - """ - - def __init__(self, num_class=10): - super(MLP, self).__init__() - self.num_class = num_class - self.fc1 = fc_with_initialize(784, 800) - self.fc2 = fc_with_initialize(800, 800) - self.fc3 = fc_with_initialize(800, self.num_class) - self.relu = nn.ReLU() - - def construct(self, x): - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.relu(x) - x = self.fc3(x) - return x - - -class GradWrap(nn.Cell): - """ GradWrap definition """ - - def __init__(self, network): - super(GradWrap, self).__init__(auto_prefix=False) - self.network = network - self.weights = ParameterTuple(filter(lambda x: x.requires_grad, network.get_parameters())) - - def construct(self, x, label): - weights = self.weights - return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) - - -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - yield _input, _target - - -net = MLP() -optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) -criterion = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) -net_with_criterion = WithLossCell(net, criterion) -train_network = GradWrap(net_with_criterion) -train_network.set_train() - -X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) -train_ds = tf.data.Dataset.from_generator(generator_train, output_types=(tf.float32, tf.int32)) -shuffle_buffer_size = 128 -batch_size = 128 -train_ds = train_ds.shuffle(shuffle_buffer_size) -train_ds = train_ds.batch(batch_size) -n_epoch = 50 - -for epoch in range(n_epoch): - start_time = time.time() - train_network.set_train() - train_loss, train_acc, n_iter = 0, 0, 0 - for X_batch, y_batch in train_ds: - X_batch = ms.Tensor(X_batch.numpy(), dtype=ms.float32) - y_batch = ms.Tensor(y_batch.numpy(), dtype=ms.int32) - output = net(X_batch) - loss_output = criterion(output, y_batch) - grads = train_network(X_batch, y_batch) - success = optimizer(grads) - loss = loss_output.asnumpy() - train_loss += loss - n_iter += 1 - # train_acc += np.mean((P.Equal()(P.Argmax(axis=1)(output), y_batch).asnumpy())) - print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) - print(" train loss: {}".format(train_loss / n_iter)) - # print(" train acc: {}".format(train_acc / n_iter)) - print(" triain weights ", train_network.trainable_params()[0].data) diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_MS_backend.py b/examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py similarity index 91% rename from examples/basic_tutorials/tutorial_mnist_mlp_dynamic_MS_backend.py rename to examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py index e480221..d23d785 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_MS_backend.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_mindspore_backend.py @@ -1,12 +1,12 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- -import mindspore.nn as nn +import os +os.environ['TL_BACKEND'] = 'mindspore' + import mindspore.ops.operations as P from mindspore.ops import composite as C -from mindspore.common import dtype as mstype -from mindspore import context, Tensor, ParameterTuple -from mindspore.common.initializer import TruncatedNormal -from mindspore.nn import SoftmaxCrossEntropyWithLogits, Momentum, WithLossCell +from mindspore import ParameterTuple +from mindspore.nn import Momentum, WithLossCell import numpy as np import tensorlayer as tl diff --git a/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py b/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py similarity index 57% rename from examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py rename to examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py index ce02d34..c93cc87 100644 --- a/examples/basic_tutorials/tutorial_paddle_tensorlayer_mlp.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_paddlepaddle_backend.py @@ -1,19 +1,23 @@ #! /usr/bin/python # -*- coding: utf-8 -*- +# The tensorlayer and Paddle operators can be mixed import os os.environ['TL_BACKEND'] = 'paddle' -# os.environ['TL_BACKEND'] = 'tensorflow' import tensorlayer as tl from tensorlayer.layers import Module from tensorlayer.layers import Dense, Flatten +import paddle +from paddle.io import TensorDataset print('download training data and load training data') X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) print('load finished') +X_train = paddle.to_tensor(X_train.astype('float32')) +y_train = paddle.to_tensor(y_train.astype('int64')) class MLP(Module): @@ -33,11 +37,16 @@ class MLP(Module): return x -traindataset = tl.dataflow.FromSlices((X_train, y_train)) -train_loader = tl.dataflow.Dataloader(traindataset, batch_size=64, shuffle=True) +traindataset = paddle.io.TensorDataset([X_train, y_train]) +train_loader = paddle.io.DataLoader(traindataset, batch_size=64, shuffle=True) net = MLP() optimizer = tl.optimizers.Adam(learning_rate=0.001) metric = tl.metric.Accuracy() -model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer, metrics=metric) -model.train(n_epoch=20, train_dataset=train_loader, print_freq=5, print_train_batch=True) +model = tl.models.Model( + network=net, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer, metrics=metric +) +model.train(n_epoch=2, train_dataset=train_loader, print_freq=5, print_train_batch=True) +model.save_weights('./model_mlp.npz', format='npz_dict') +model.load_weights('./model_mlp.npz', format='npz_dict') +# model.eval(train_loader) diff --git a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_TF_backend.py b/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py similarity index 82% rename from examples/basic_tutorials/tutorial_mnist_mlp_dynamic_TF_backend.py rename to examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py index 1287391..2ed6771 100644 --- a/examples/basic_tutorials/tutorial_mnist_mlp_dynamic_TF_backend.py +++ b/examples/basic_tutorials/tutorial_mnist_mlp_tensorflow_backend.py @@ -1,5 +1,8 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- +# The tensorlayer and tensorflow operators can be mixed +import os +os.environ['TL_BACKEND'] = 'tensorflow' import numpy as np import time @@ -53,18 +56,17 @@ for epoch in range(n_epoch): ## iterate the dataset n_epoch times ## compute outputs _logits = MLP(X_batch) ## compute loss and update model - _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') + _loss = tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='train_loss') grad = tape.gradient(_loss, train_weights) optimizer.apply_gradients(zip(grad, train_weights)) ## use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - MLP.set_train() print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) train_loss, train_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): _logits = MLP(X_batch) - train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + train_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='eval_loss') train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" train loss: {}".format(train_loss / n_iter)) @@ -73,19 +75,19 @@ for epoch in range(n_epoch): ## iterate the dataset n_epoch times val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): _logits = MLP(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + val_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='eval_loss') val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" val loss: {}".format(val_loss / n_iter)) print(" val acc: {}".format(val_acc / n_iter)) ## use testing data to evaluate the model -MLP.eval() +MLP.set_eval() test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): _logits = MLP(X_batch, foo=1) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') + test_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='test_loss') test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 -print(" test foo=1 loss: {}".format(val_loss / n_iter)) -print(" test foo=1 acc: {}".format(val_acc / n_iter)) +print(" test foo=1 loss: {}".format(test_loss / n_iter)) +print(" test foo=1 acc: {}".format(test_acc / n_iter)) diff --git a/examples/basic_tutorials/tutorial_mnist_simple.py b/examples/basic_tutorials/tutorial_mnist_simple.py index 4d2bc7c..8929b9c 100644 --- a/examples/basic_tutorials/tutorial_mnist_simple.py +++ b/examples/basic_tutorials/tutorial_mnist_simple.py @@ -1,18 +1,37 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- -import numpy as np +# The same set of code can switch the backend with one line import os -os.environ['TL_BACKEND'] = 'tensorflow' +# os.environ['TL_BACKEND'] = 'tensorflow' # os.environ['TL_BACKEND'] = 'mindspore' +os.environ['TL_BACKEND'] = 'paddle' import tensorlayer as tl from tensorlayer.layers import Module from tensorlayer.layers import Dense, Dropout +from tensorlayer.dataflow import Dataset X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) +class mnistdataset(Dataset): + + def __init__(self, data=X_train, label=y_train): + self.data = data + self.label = label + + def __getitem__(self, index): + data = self.data[index].astype('float32') + label = self.label[index].astype('int64') + + return data, label + + def __len__(self): + + return len(self.data) + + class CustomModel(Module): def __init__(self): @@ -27,7 +46,6 @@ class CustomModel(Module): def forward(self, x, foo=None): z = self.dropout1(x) z = self.dense1(z) - # z = self.bn(z) z = self.dropout2(z) z = self.dense2(z) z = self.dropout3(z) @@ -37,32 +55,23 @@ class CustomModel(Module): return out -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - yield (_input, np.array(_target)) - - MLP = CustomModel() n_epoch = 50 batch_size = 128 print_freq = 2 -shuffle_buffer_size = 128 train_weights = MLP.trainable_weights optimizer = tl.optimizers.Momentum(0.05, 0.9) -train_ds = tl.dataflow.FromGenerator( - generator_train, output_types=(tl.float32, tl.int32) , column_names=['data', 'label'] +metric = tl.metric.Accuracy() +loss_fn = tl.cost.softmax_cross_entropy_with_logits +train_dataset = mnistdataset(data=X_train, label=y_train) +train_dataset = tl.dataflow.FromGenerator( + train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label'] ) -train_ds = tl.dataflow.Shuffle(train_ds,shuffle_buffer_size) -train_ds = tl.dataflow.Batch(train_ds,batch_size) - +train_loader = tl.dataflow.Dataloader(train_dataset, batch_size=batch_size, shuffle=True) -model = tl.models.Model(network=MLP, loss_fn=tl.cost.cross_entropy, optimizer=optimizer) -model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) +model = tl.models.Model(network=MLP, loss_fn=loss_fn, optimizer=optimizer, metrics=metric) +model.train(n_epoch=n_epoch, train_dataset=train_loader, print_freq=print_freq, print_train_batch=False) model.save_weights('./model.npz', format='npz_dict') model.load_weights('./model.npz', format='npz_dict') diff --git a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py index 27ae9be..24c3574 100644 --- a/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py +++ b/examples/basic_tutorials/tutorial_nested_usage_of_Layer.py @@ -1,5 +1,7 @@ -#!/usr/bin/env python3 +#! /usr/bin/python # -*- coding: utf-8 -*- +import os +os.environ['TL_BACKEND'] = 'tensorflow' import time import numpy as np @@ -12,7 +14,9 @@ from tensorlayer.layers import (Conv2d, Dense, Flatten, MaxPool2d, BatchNorm2d, X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) + class Block(Module): + def __init__(self, in_channels): super(Block, self).__init__() self.dense1 = Dense(in_channels=in_channels, n_units=256) @@ -83,6 +87,7 @@ class CNN(Module): # get the network net = CNN() +print(net) # training settings batch_size = 128 n_epoch = 500 @@ -173,7 +178,7 @@ for epoch in range(n_epoch): # compute outputs _logits = net(X_batch) # compute loss and update model - _loss_ce = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') + _loss_ce = tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='train_loss') grad = tape.gradient(_loss_ce, train_weights) optimizer.apply_gradients(zip(grad, train_weights)) @@ -189,23 +194,23 @@ for epoch in range(n_epoch): # use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - net.eval() + net.set_eval() val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) # is_train=False, disable dropout - val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') + val_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='eval_loss') val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" val loss: {}".format(val_loss / n_iter)) print(" val acc: {}".format(val_acc / n_iter)) # use testing data to evaluate the model -net.eval() +net.set_eval() test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) - test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') + test_loss += tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch, name='test_loss') test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" test loss: {}".format(test_loss / n_iter)) -print(" test acc: {}".format(test_acc / n_iter)) \ No newline at end of file +print(" test acc: {}".format(test_acc / n_iter)) diff --git a/examples/model_zoo/__init__.py b/examples/model_zoo/__init__.py index e69de29..3a26a8e 100644 --- a/examples/model_zoo/__init__.py +++ b/examples/model_zoo/__init__.py @@ -0,0 +1,6 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +from .vgg import vgg16, vgg19 +from .yolo import YOLOv4 +from .resnet import ResNet50 diff --git a/examples/model_zoo/common.py b/examples/model_zoo/common.py index 7bc1bfd..d4f3484 100644 --- a/examples/model_zoo/common.py +++ b/examples/model_zoo/common.py @@ -6,6 +6,7 @@ import colorsys, random, cv2 import numpy as np from tensorlayer.visualize import save_image + def decode_tf(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=0, XYSCALE=[1, 1, 1]): batch_size = tf.shape(conv_output)[0] conv_output = tf.reshape(conv_output, (batch_size, output_size, output_size, 3, 5 + NUM_CLASS)) @@ -284,4 +285,4 @@ def draw_boxes_and_labels_to_image_with_json(image, json_result, class_list, sav if save_name is not None: save_image(image, save_name) - return image \ No newline at end of file + return image diff --git a/examples/model_zoo/pretrained_resnet50.py b/examples/model_zoo/pretrained_resnet50.py index cac33eb..9c97618 100644 --- a/examples/model_zoo/pretrained_resnet50.py +++ b/examples/model_zoo/pretrained_resnet50.py @@ -14,7 +14,7 @@ from examples.model_zoo.resnet import ResNet50 tl.logging.set_verbosity(tl.logging.DEBUG) # get the whole model -resnet = ResNet50(pretrained=False) +resnet = ResNet50(pretrained=True) resnet.set_eval() img1 = tl.vis.read_image('data/tiger.jpeg') diff --git a/examples/model_zoo/pretrained_yolov4.py b/examples/model_zoo/pretrained_yolov4.py index c8d3908..9c5f6df 100644 --- a/examples/model_zoo/pretrained_yolov4.py +++ b/examples/model_zoo/pretrained_yolov4.py @@ -1,3 +1,6 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + import numpy as np import cv2 from PIL import Image @@ -25,4 +28,4 @@ json_result = result_to_json(image, pred_bbox) image = draw_boxes_and_labels_to_image_with_json(image, json_result, class_names) image = Image.fromarray(image.astype(np.uint8)) -image.show() \ No newline at end of file +image.show() diff --git a/examples/model_zoo/resnet.py b/examples/model_zoo/resnet.py index c57bef9..75dfbc5 100644 --- a/examples/model_zoo/resnet.py +++ b/examples/model_zoo/resnet.py @@ -28,6 +28,7 @@ in_channels_conv = [64, 256, 512, 1024] in_channels_identity = [256, 512, 1024, 2048] henorm = tl.initializers.he_normal() + class identity_block(Module): """The identity block where there is no conv layer at shortcut. @@ -49,10 +50,11 @@ class identity_block(Module): Output tensor of this block. """ + def __init__(self, kernel_size, n_filters, stage, block): super(identity_block, self).__init__() filters1, filters2, filters3 = n_filters - _in_channels = in_channels_identity[stage-2] + _in_channels = in_channels_identity[stage - 2] conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' @@ -60,7 +62,9 @@ class identity_block(Module): self.bn1 = BatchNorm(name=bn_name_base + '2a', act='relu', num_features=filters1) ks = (kernel_size, kernel_size) - self.conv2 = Conv2d(filters2, ks, padding='SAME', W_init=henorm, name=conv_name_base + '2b', in_channels=filters1) + self.conv2 = Conv2d( + filters2, ks, padding='SAME', W_init=henorm, name=conv_name_base + '2b', in_channels=filters1 + ) self.bn2 = BatchNorm(name=bn_name_base + '2b', act='relu', num_features=filters2) self.conv3 = Conv2d(filters3, (1, 1), W_init=henorm, name=conv_name_base + '2c', in_channels=filters2) @@ -80,23 +84,30 @@ class identity_block(Module): class conv_block(Module): + def __init__(self, kernel_size, n_filters, stage, block, strides=(2, 2)): super(conv_block, self).__init__() filters1, filters2, filters3 = n_filters - _in_channels = in_channels_conv[stage-2] + _in_channels = in_channels_conv[stage - 2] conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' - self.conv1 = Conv2d(filters1, (1, 1), strides=strides, W_init=henorm, name=conv_name_base + '2a', in_channels=_in_channels) + self.conv1 = Conv2d( + filters1, (1, 1), strides=strides, W_init=henorm, name=conv_name_base + '2a', in_channels=_in_channels + ) self.bn1 = BatchNorm(name=bn_name_base + '2a', act='relu', num_features=filters1) ks = (kernel_size, kernel_size) - self.conv2 = Conv2d(filters2, ks, padding='SAME', W_init=henorm, name=conv_name_base + '2b', in_channels=filters1) + self.conv2 = Conv2d( + filters2, ks, padding='SAME', W_init=henorm, name=conv_name_base + '2b', in_channels=filters1 + ) self.bn2 = BatchNorm(name=bn_name_base + '2b', act='relu', num_features=filters2) self.conv3 = Conv2d(filters3, (1, 1), W_init=henorm, name=conv_name_base + '2c', in_channels=filters2) self.bn3 = BatchNorm(name=bn_name_base + '2c', num_features=filters3) - self.shortcut_conv = Conv2d(filters3, (1, 1), strides=strides, W_init=henorm, name=conv_name_base + '1', in_channels=_in_channels) + self.shortcut_conv = Conv2d( + filters3, (1, 1), strides=strides, W_init=henorm, name=conv_name_base + '1', in_channels=_in_channels + ) self.shortcut_bn = BatchNorm(name=bn_name_base + '1', num_features=filters3) self.add = Elementwise(tl.add, act='relu') @@ -117,6 +128,7 @@ class conv_block(Module): class ResNet50_model(Module): + def __init__(self, end_with='fc1000', n_classes=1000): super(ResNet50_model, self).__init__() self.end_with = end_with @@ -141,7 +153,9 @@ class ResNet50_model(Module): block = block_name[1] if block == 'a': strides = (1, 1) if stage == 2 else (2, 2) - layer_list.append(conv_block(3, block_filters[stage - 2], stage=stage, block=block, strides=strides)) + layer_list.append( + conv_block(3, block_filters[stage - 2], stage=stage, block=block, strides=strides) + ) else: layer_list.append(identity_block(3, block_filters[stage - 2], stage=stage, block=block)) elif block_name == 'avg_pool': @@ -155,7 +169,8 @@ class ResNet50_model(Module): def ResNet50(pretrained=False, end_with='fc1000', n_classes=1000): - """Pre-trained MobileNetV1 model (static mode). Input shape [?, 224, 224, 3]. + """Pre-trained ResNet50 model. Input shape [?, 224, 224, 3]. + To use pretrained model, input should be in BGR format and subtracted from ImageNet mean [103.939, 116.779, 123.68]. Parameters @@ -175,14 +190,14 @@ def ResNet50(pretrained=False, end_with='fc1000', n_classes=1000): Classify ImageNet classes, see `tutorial_models_resnet50.py` TODO Modify the usage example according to the model storage location >>> # get the whole model with pretrained weights - >>> resnet = tl.models.ResNet50(pretrained=True) + >>> resnet = ResNet50(pretrained=True) >>> # use for inferencing - >>> output = resnet(img1, is_train=False) - >>> prob = tf.nn.softmax(output)[0].numpy() + >>> output = resnet(img1) + >>> prob = tl.ops.softmax(output)[0].numpy() Extract the features before fc layer - >>> resnet = tl.models.ResNet50(pretrained=True, end_with='5c') - >>> output = resnet(img1, is_train=False) + >>> resnet = ResNet50(pretrained=True, end_with='5c') + >>> output = resnet(img1) Returns ------- @@ -212,14 +227,15 @@ def restore_params(network, path='models'): f = h5py.File(os.path.join(path, 'resnet50_weights_tf_dim_ordering_tf_kernels.h5'), 'r') - for layer in network.all_layers: - if len(layer.all_weights) == 0: - continue - w_names = list(f[layer.name]) - params = [f[layer.name][n][:] for n in w_names] - # if 'bn' in layer.name: - # params = [x.reshape(1, 1, 1, -1) for x in params] - assign_weights(params, layer) - del params + # TODO Update parameter loading + # for layer in network.all_layers: + # if len(layer.all_weights) == 0: + # continue + # w_names = list(f[layer.name]) + # params = [f[layer.name][n][:] for n in w_names] + # # if 'bn' in layer.name: + # # params = [x.reshape(1, 1, 1, -1) for x in params] + # assign_weights(params, layer) + # del params f.close() diff --git a/examples/model_zoo/vgg.py b/examples/model_zoo/vgg.py index 779635d..db11b40 100644 --- a/examples/model_zoo/vgg.py +++ b/examples/model_zoo/vgg.py @@ -151,6 +151,7 @@ def make_layers(config, batch_norm=False, end_with='outputs'): break return SequentialLayer(layer_list) + def restore_model(model, layer_type): logging.info("Restore pre-trained weights") # download weights @@ -177,6 +178,7 @@ def restore_model(model, layer_type): assign_weights(weights, model) del weights + def vgg16(pretrained=False, end_with='outputs', mode='dynamic', name=None): """Pre-trained VGG16 model. @@ -196,38 +198,17 @@ def vgg16(pretrained=False, end_with='outputs', mode='dynamic', name=None): Classify ImageNet classes with VGG16, see `tutorial_models_vgg.py `__ With TensorLayer TODO Modify the usage example according to the model storage location + >>> # get the whole model, without pre-trained VGG parameters - >>> vgg = tl.models.vgg16() + >>> vgg = vgg16() >>> # get the whole model, restore pre-trained VGG parameters - >>> vgg = tl.models.vgg16(pretrained=True) + >>> vgg = vgg16(pretrained=True) >>> # use for inferencing - >>> output = vgg(img, is_train=False) - >>> probs = tf.nn.softmax(output)[0].numpy() - - Extract features with VGG16 and Train a classifier with 100 classes - - >>> # get VGG without the last layer - >>> cnn = tl.models.vgg16(end_with='fc2_relu', mode='static').as_layer() - >>> # add one more layer and build a new model - >>> ni = Input([None, 224, 224, 3], name="inputs") - >>> nn = cnn(ni) - >>> nn = tl.layers.Dense(n_units=100, name='out')(nn) - >>> model = tl.models.Model(inputs=ni, outputs=nn) - >>> # train your own classifier (only update the last layer) - >>> train_params = model.get_layer('out').trainable_weights - - Reuse model - - >>> # in dynamic model, we can directly use the same model - >>> # in static model - >>> vgg_layer = tl.models.vgg16().as_layer() - >>> ni_1 = tl.layers.Input([None, 224, 244, 3]) - >>> ni_2 = tl.layers.Input([None, 224, 244, 3]) - >>> a_1 = vgg_layer(ni_1) - >>> a_2 = vgg_layer(ni_2) - >>> M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) + >>> output = vgg(img) + >>> probs = tl.ops.softmax(output)[0].numpy() """ + if mode == 'dynamic': model = VGG(layer_type='vgg16', batch_norm=False, end_with=end_with, name=name) elif mode == 'static': @@ -259,35 +240,12 @@ def vgg19(pretrained=False, end_with='outputs', mode='dynamic', name=None): With TensorLayer >>> # get the whole model, without pre-trained VGG parameters - >>> vgg = tl.models.vgg19() + >>> vgg = vgg19() >>> # get the whole model, restore pre-trained VGG parameters - >>> vgg = tl.models.vgg19(pretrained=True) + >>> vgg = vgg19(pretrained=True) >>> # use for inferencing - >>> output = vgg(img, is_train=False) - >>> probs = tf.nn.softmax(output)[0].numpy() - - Extract features with VGG19 and Train a classifier with 100 classes - - >>> # get VGG without the last layer - >>> cnn = tl.models.vgg19(end_with='fc2_relu', mode='static').as_layer() - >>> # add one more layer and build a new model - >>> ni = Input([None, 224, 224, 3], name="inputs") - >>> nn = cnn(ni) - >>> nn = tl.layers.Dense(n_units=100, name='out')(nn) - >>> model = tl.models.Model(inputs=ni, outputs=nn) - >>> # train your own classifier (only update the last layer) - >>> train_params = model.get_layer('out').trainable_weights - - Reuse model - - >>> # in dynamic model, we can directly use the same model - >>> # in static model - >>> vgg_layer = tl.models.vgg19().as_layer() - >>> ni_1 = tl.layers.Input([None, 224, 244, 3]) - >>> ni_2 = tl.layers.Input([None, 224, 244, 3]) - >>> a_1 = vgg_layer(ni_1) - >>> a_2 = vgg_layer(ni_2) - >>> M = Model(inputs=[ni_1, ni_2], outputs=[a_1, a_2]) + >>> output = vgg(img) + >>> probs = tl.ops.softmax(output)[0].numpy() """ if mode == 'dynamic': diff --git a/examples/model_zoo/yolo.py b/examples/model_zoo/yolo.py index d3784b2..3d32f5b 100644 --- a/examples/model_zoo/yolo.py +++ b/examples/model_zoo/yolo.py @@ -15,6 +15,8 @@ from tensorlayer.layers import Conv2d, MaxPool2d, BatchNorm2d, ZeroPad2d, UpSamp from tensorlayer.layers import Module, SequentialLayer from tensorlayer import logging +__all__ = ['YOLOv4'] + INPUT_SIZE = 416 weights_url = {'link': 'https://pan.baidu.com/s/1MC1dmEwpxsdgHO1MZ8fYRQ', 'password': 'idsz'} @@ -24,7 +26,8 @@ class Convolutional(Module): Create Convolution layer Because it is only a stack of reference layers, there is no build, so self._built=True """ - def __init__(self, filters_shape, downsample=False, activate=True, bn=True, activate_type='leaky',name=None): + + def __init__(self, filters_shape, downsample=False, activate=True, bn=True, activate_type='leaky', name=None): super(Convolutional, self).__init__() self.act = activate self.act_type = activate_type @@ -44,11 +47,13 @@ class Convolutional(Module): b_init = tl.initializers.constant(value=0.0) self.zeropad = ZeroPad2d(((1, 0), (1, 0))) - self.conv = Conv2d(n_filter=filters_shape[-1], in_channels=filters_shape[2], filter_size=(filters_shape[0], filters_shape[1]), - strides=(strides, strides),padding=padding, b_init=b_init, name=name) + self.conv = Conv2d( + n_filter=filters_shape[-1], in_channels=filters_shape[2], filter_size=(filters_shape[0], filters_shape[1]), + strides=(strides, strides), padding=padding, b_init=b_init, name=name + ) if bn: - if activate == True: + if activate ==True: if activate_type == 'leaky': self.batchnorm2d = BatchNorm2d(act='leaky_relu0.1', num_features=filters_shape[-1]) elif activate_type == 'mish': @@ -66,7 +71,9 @@ class Convolutional(Module): output = self.batchnorm2d(output) return output + class residual_block(Module): + def __init__(self, input_channel, filter_num1, filter_num2, activate_type='leaky'): super(residual_block, self).__init__() self.conv1 = Convolutional(filters_shape=(1, 1, input_channel, filter_num1), activate_type=activate_type) @@ -79,13 +86,16 @@ class residual_block(Module): output = self.add([inputs, output]) return output + def residual_block_num(num, input_channel, filter_num1, filter_num2, activate_type='leaky'): residual_list = [] for i in range(num): residual_list.append(residual_block(input_channel, filter_num1, filter_num2, activate_type=activate_type)) return SequentialLayer(residual_list) + class cspdarknet53(Module): + def __init__(self): super(cspdarknet53, self).__init__() self._built = True @@ -124,7 +134,6 @@ class cspdarknet53(Module): self.conv5_5 = Convolutional((1, 1, 1024, 512), activate_type='mish') self.residual_5 = residual_block_num(4, 512, 512, 512, activate_type="mish") - self.conv6_1 = Convolutional((1, 1, 512, 512), activate_type='mish') self.conv6_2 = Convolutional((1, 1, 1024, 1024), activate_type='mish') self.conv6_3 = Convolutional((1, 1, 1024, 512)) @@ -206,6 +215,7 @@ class cspdarknet53(Module): class YOLOv4_model(Module): + def __init__(self, NUM_CLASS): super(YOLOv4_model, self).__init__() self.cspdarnnet = cspdarknet53() @@ -310,6 +320,7 @@ class YOLOv4_model(Module): return conv_sbbox, conv_mbbox, conv_lbbox + def YOLOv4(NUM_CLASS, pretrained=False): """Pre-trained YOLOv4 model. @@ -327,11 +338,11 @@ def YOLOv4(NUM_CLASS, pretrained=False): With TensorLayer >>> # get the whole model, without pre-trained YOLOv4 parameters - >>> yolov4 = tl.app.YOLOv4(NUM_CLASS=80, pretrained=False) + >>> yolov4 = YOLOv4(NUM_CLASS=80, pretrained=False) >>> # get the whole model, restore pre-trained YOLOv4 parameters - >>> yolov4 = tl.app.YOLOv4(NUM_CLASS=80, pretrained=True) + >>> yolov4 = YOLOv4(NUM_CLASS=80, pretrained=True) >>> # use for inferencing - >>> output = yolov4(img, is_train=False) + >>> output = yolov4(img) """ @@ -359,7 +370,11 @@ def restore_params(network, model_path='models.npz'): network.all_weights[i].assign(npz[line[i].strip()]) logging.info(" Loading weights %s in %s" % (network.all_weights[i].shape, network.all_weights[i].name)) -def tl2_weights_to_tl3_weights(weights_2_path='model/weights_2.txt', weights_3_path='model/weights_3.txt', txt_path='model/yolov4_weights_config.txt'): + +def tl2_weights_to_tl3_weights( + weights_2_path='model/weights_2.txt', weights_3_path='model/weights_3.txt', + txt_path='model/yolov4_weights_config.txt' +): weights_2_path = weights_2_path weights_3_path = weights_3_path txt_path = txt_path diff --git a/img/tensorlayer_v.png b/img/tensorlayer_v.png new file mode 100644 index 0000000000000000000000000000000000000000..c0a6f6c9000aaa1ef4ff36a8be7f87b016b7dffd GIT binary patch literal 27496 zcmd42dsx!>);Da^j7|4sPPu1xT56M}P3okXny64a6DQvn5m7n{BJeZSZ9{Bu9={an|}btyj(zOLUo zeAZf@@A~GP^#beshrxenX=&-WySbdx($e0krL`&fdu`1-T~Ba4&C{l&bKt*e5$Sv8 zH7~w}obWuMrB#ReVg1T?n%7%zxCJC>X>IS^_}N5@uezqC6{U1{IdLI1M8w%zXF+5B zcGkyA+sA6l$se`PTWz-aakGilH#>gZWNo#Dx*GFLEm9uA*TauhD#{!8!6p!`vwLb1 zs}>ZUV$JHB#}B3&fi#b-1A1G(Uc7pc^X=D*%RMgNe7$&aZQrJ^7Z;D@e|^@nC~^Dq zqR3Dk0cV=7fs#_CliD>PFXma7hJ>ZudR!E#lS zCDThTDMp8a=uuRnac0qTWvPFD(j04xR%j192{~I7d%MAg-;^VaXD!&QW7X?CY1#n2 z8Z4y@Z_gq4Q=FrNPQ`Z!n|??Nfwtme<<>j>$FYu)g;3aV)aaz+ALhJ1nzcsjuPDpI z-XNu_Qx_ET4y#(JuZcm%z`*RQ!#EFeW`#RNDpgb>;S1&ry5Mkwg-f! zFxSpNNTjlr8e#?aZaJbbZy|>20_X?Jza$}p0RvDp*=_7VJaOAYtyhk7j5gIJth=Hz zZuwEi`V8s-ZD|{6ocSU4Ofd3xFMc;>h%sDZOJFz6>)v#S&`K-`YF^T}Q zqiAM;MnJ?Fxg@C<2oW3@`qg;|1uAoUSsG!IsDv0^)k-x}2C%R_Hk24W@L^{mFf<-{ zn=!UIVPy8$AKP9rOpizKQlYD%I$a30X5*r5fsRgMs_;(M@GBW zB2jJ=SLvM8XNUjmPSLu4cE>X%j8W$wxNfPQ8}YmflOIn8gwLnvx`7X3nNU|k#?C7KxHMC|{hTqf5tOZ( zN+)+rCXnjAf{W(#w$8thK>f0{os~PMF8K`(*QQ)htQqTET$zHXgMoO#{s&%?@&~u^ z>0Wbhzghfgg*QHE1A7@>Hd$2c9G23~Xvg(WmkU{qV53kpmx} zx&Cyz$}{EYD=s4vX4Gjiw&8Qq;ixy)?S36%^=SNk;Pdswe=x!{NR<^|nF91jPqBB) zky`D7`0{6)KX6UY@B|)!F|t>nl)}GNxf#?{J@kt}QO&Y;^ixs!ZwCNke@R}Hls9L8K40`N&BC`NFrSn_pGO#0 z)TE3JU0Fc4zGbue6d60`U!ZkjAZg+XEVLV9IExdM*Jk>9j|<7V$0~-MhfyHGM@(?i zqvSGA(>>6IO&<+cAmcTh^T8Zb#$bzTSHr^QxyZiq<|U0XAIV#&1Np}g?F?-d!l1uB z`~`r)zD2w1J4UA?nA_7YD3(tLhKD)|q)=F|G3hJdbG-moj)tWHTYg2?n;@;=giMKG zhP<6ulLViwItdhfE6ce{tPK5JGwEZP_MU{0ivGx052%#PmHJ1ZdFV~LTB+emc2kt> zfCUV<1-ZpZ|M4nW`-GrzTOwZxA>tOyZifp4eOQF-ij!90;HyNtPj ziB6rvUULFi>*h%gK{U#p6Bp=^VHdC-CFo_L9FBk##%aXrDr_kA+&MtRX)LJvw*_ey zhQ#Ctt`);f?#I*Mgf>p&kmK#fPJ2Odt{2Q+QS{2|PRfCCsd6SgLhpcM_N*o@Y?bhS zDSJ39`4D-5P1&izw0pm40&6Fk9-AV7X)V^slWHl5h(U^j!8)3n5QQ!vkA}*y${`O>OJ6r)WtZoVUEVPeB^-Jv$%c_{C*It%pkhcp6 zIKayCKnFf!z1=B-RUU>ZQI<35e@ZASD%N?gtNiZnWP1s9uLjkMojYwVVi@55G3y+* z4I<@6Ti@eVHq4PVYNqp4S0!5+0P>gK_Tt8{)@ShLL7oa=+L5b;&&g2K)yjbZp4HcU zkp$iAl)qgtn3ZUpg`vi>9CLHnWkK_IC6WdGevF7j<6G93;OFfH7d41?l8%GH70r?< zhBPzUfKC@+8n$FHMzPTp7vnji{BcI+&CAlf5zL3APi~1_?6k1R(2zE zd9Du(h(OEJp7Qnnf?vtJrm%^KY}9k;>Wxw2-Y>v||I`<8`z_W*=sAR0?C1#$RRkR8 z)@FY+Co2brKF;Yh@}uMf#Dr0wTDb!&4*|*$kI-r(k2xMT4Ahzo$Ru)h;%^Gs$K^5i z;nJkv&B>5epXrG_43iAegn$*99IWm-VR%8TryZsUXkkXriF%o9zb@DMOe?X~f{KE8?D&XY2w7gK3`-Dv~b&M;7Y#L%a4G$mo z879&!s1b(DL*pY2HJi6P$0z>jWsZ@@1TowKsn#M}o)kN}Fh58M@Z4XnBn(<9wmxPq zLt*QMTwZ?%5N%+1fM{@x)9Lhzq<64(6Qb_+0|6n~fz>&ls z-&5G}P%q@sM$y%d2L9O2_n678VqZpxr~9?B5H!w3yvtNx<{v;J6)@s#$3p?Xlj&Ch zr4Yk6n2a2J1Z_c{hO(cO^T-VW!OJBqc{1)O^I4Ul)D|bF7>4$-#d4yqn=Eag3?_qq-)$c&b)=b@+s#;n3K8zqKL*4n~Y# z3Guwt4_NRY0MwW(*AepYG)mIINg!6|_Oy*wYKn5L$>u(yX+U`Dk8S0=HN;<(X^x6C z9KoOE-jDf2{_8Ih*-Ek4h$Ldw&-WXZvO*1ZO6h#q+c*3&p97qc# zB1S;q0c1pC9FoIMTY)C_mrBXq%}ektGQ|Cy2N_7M&%G{x>nC1u87TO4U5WlMe}D5v4gON)fpHjruKSUp-3EzZ zs8u@M9N3xuQBk&v92SzpiFPc#dr@Q}>SD`HB1HL+CJ-djHX8CShEs8Z#bW0$t^bvw z;`G1wUdf9TThRgD=HwF!_ITX2cW=5Iz*}}Rwec%BM5fOQ zOH=mI7Cpb!YJZ?22pH2PxY!8LRMHIF0fAvX`kB}e6cNvsJ=z-)o5y*1D^u2 zU;h%pt=VN=wl_6GkyPXdjw+;3Duy$Kts z^S;Bc!j*I{1%RQWzlgaRyOz>I?R|nhJKnNg{svG&BH}+Dh+sJh4jP{Vym7BoiGJ8{ zwh~syCad8s3PDmE#Tu8uO(lpfkzeCWt_b$&)1+5OfW%jplalXg}L3yTKF z-u`sG%xPFq&w;Go=f!b=KG=6k{dV}ME*X-M*(7Tkwn#K|j7+Mah<1Vt_U0#vc#i}D z=rh#Ob!w=fBqNWC_Gg4c)XE8kL|WgV=TFK(UHLzu4gRn8&l_CD|CYpf^#-K+g5s)G zvJSOlhC)OE0XNtet3RPX)poO#+ zk;QHwGuPz;lA5apH{Al88Bn%}xGE_HRMhs@f08olEBkJ$z1vT{-!hnzF?kRaHbUup z`_{4dTQ_Q+$NfudIc6J6Ws%;ve51w!?_xfzb!=B{h{~`N%gL_U>KOfv54>{uMvsiO z@7@2pgtSClg*o3omos*UKls=kv-D{WlDnkV+29$}$SF{~8mruP zE+eoi?tI6A`|%#rWD6`3v3Gp7IBeXdDF8YA*f^S}1{WKeC_Zpt(AWSw;^F*;UP!tw zHDv35Qa}mum%{hmtj2@lM_8dsf{%6|M7^c+?z$Nbe^Zto!WwuC*B;UE4j?sy$EMw% zaiFMTa}1!$Ula?1+H3DS)P(b>J(=a=e&N`@Od=gg*t~IPiyo#si(BN&uE@m8QnD)~ zTV%U5iezmWPr3)pxnHHXnLIEYhEK**?Nb$H?952{Y{FuLSCNSes7-kxXm%Fa(!jk zTdlHL3J5l%?V_C-nW1>>Us0MJsk;FS&dF%QMfch3)svX^fCs9t3p1|nyZMaG*gcSO zx>EhEcW9+<7UTue?w5S`toD#bgP0AwtlU(w;cF#N*vHq|E^mZEyYN`cQco?R*MpU$ zKK1!Q*1X~?o0qy@Z|fd@{D>@mw{V@{qh7-@#d3z!H?s+1_aWoxU0_-9MNdF7L~pAy zXjhKPGMjH<2w^qJpP%6RoBrwFn0GGU+@WVoR;fYXBD0+0=U6j2+f*p5YJQMao4rha zEycZ7FxB~185wie$JxynkPn`Qrr4jM@p~I3@qruorFmd9+PV_~iod{pDBjFhj%$-Q zEt&gl876IO(k-!?8z1T3yDo$j(M@*}x6fil;@-x8l8!(PM0iQCLp|#6ek8~5UOBPG z)>b`A0hWY3~;Wz}L86Htf0~-79~Mlb*+8?fKbDN=WYQ7O$%k?DyE_lzmsa zr(N592W7r!&;9*HjEsGebKJXg+3G(D(5#K`S(0xquJ1pqjIq5xxkUs49k>ww7*wEx zMC0|guDNzbt;K{gHQ7RK=emzof<^EnxPPB3_|=~{@F(HAnD4RwMYwd>$u=)z%xtrq z9D76yvq`Z-{|2dG`x>F;$OogarzX52>~A%+c|DT=N-rFm_EezvYfgHR5~8RG0MRd2 zdLR3zocxBe5EUg0%?)5O$3#@{c4wFP7pdXsAX zFN4k?wH~=A%fJCu&Je0?HDPymJ##CB`?CA8da$|lPY?JiS@mDxbjgofpnpln09yWC!yVG7&1gN5s>f>5=b=BGBp zq5vvpMaZ)ZCgSqYM-3@V>%jV=iW1T5t5s9TTVwfqSV@e=X`_p>M=Pa0@J#tIRu%kP z5+nVmU%guyZ+%pJ$!y?cvr~Es6}Xc6Zk>8Af-!ACYmeA`hFd+b>ekGyUHAzsdIm0} zjrm{X6hP{^bsTd}CHB`yOdB7GPlpwSb_TRsD#l9g;@3cUlzEz6t|*Z%-XD8EVS)Rs zA6fLU)Bh#5%uQ=mJId_ysM}N)ttvMQJ*V@JQY@BzH}&~Z`T5x>z@^ChVmF*}Ba>po zR@^G54YeP^135cUUR-fZQ9F2$xxRjAXcGt@uU|1aUnM?$2L>zOfdWo~OPI1C>>ytH z+GF7kM38WX^p(K@owZrH1Y<4ekv?on@#?E;Ub`5ict3fTQP z<3bu?4K|~i6wikSEU+BY3f&Coe45pG#Lo_sKCPcdTY@RA54c#%NNYOcE&Z(yrMWTP zd5qOg)>@3mZFc@c;|!Ygx%ZimW02XG5?ws$x!pHId(qEQawiv+eDs^PpB~m9BPP7{ z+48Z*W8X^Hy_E%QWnP>7+MB_$3leyuB^kQYK!rNr$FQi4zGn5bj~AvO?UmF%cSnx) zYi}~Leu+)4RqT~P25hY>CH=vTHrBFlxfShLi_ zwz>+NI6p2#Hi3fqMUbjkplCCs#BR!DVBt=qhlW#9ahU5bE1gEu`O|oUsihVrFij5pQBEn%*$Qzns_?P_P1IHn7=_c-FlAO zB{4lG7W;_ns%g{-S-ev?+8=8{;5W0p)|rY6#I%?oS;p~u^|M=VCd zjQ7MhJ-1j4p<@!N#mKBxvY9TUSNIczg=H8)2_(X!{B?w#L)p!_mN;3}(#2&<#e^^A zOuDV|)Z=yM8Mtje#t!T$4#2#!84`I;}8zMg8_kr$NfUwXMxtm=fYwXl=h7 z%1PKM(YNKc$$}QLeKq&B&CvZY6RIAiDoq+(ksUHD8bt(&K14j8bqlsP{FpK*NH)CR zl87~sQP%atjB`>FS3CxsRl%e`oLd<$x9uIRzhT$B<^c|u){q4Z2nsOj?>i2csUUTz zLnvErJtG*}`quxPnu}Zct5Lw6lwo1alImYVM|$kpfB0#8mTlR!PZ^XV5_q`uo7LI4 z*s6#Z`*d1j3c8QG8vF>!ZHciTH9nH{Fm^eB;}7KKM_2J4baz5ijvvT<6GXuVd($D8 z&KR6SzK~D49v>j1MFnO7@#VwENaXdJwS@ii_EJQVIt;69D_{kTQWz~$fk_-$P4;Q> z_T!I|4Fufr=5-57#g(NeqdhLf%u26&vHTWl-mJKlHzd~0K`%L8LkIP$-;>OE1h6{i-wwiYad}9iYQ=>6IPv*7yUs6>7-sDL$i#sL@8>}1kTdeU zBroUFEvQVMJY^)-H4v3)7V#j&B-20qIqT8ON)>qNK$?RT?MB_bR=6x?9hp7`VKtg4 z1`-6Sr1f&zvC8sLXOdx*7?}no_`otCn*DxpP?tk zY&MLNeorB=WV}wm64uIxv3*gMDEfU&0c8 zcG<^ZgjA1tCc7Aa6u=l+AAoYrXJUniBa=*XDRn9@W$G#aA*=8RzuVE}%$&NA-Zue; z&gNUdvO7D>Wku>MghzrNC}SbhIap91G;-82k>MOe`E3MVcI=$^J4bW}(a4OK@&`vT zdUja6LspEk!VgOTu%Y(R+&a~2%2iXEL7%@&-m*R^HwK40VJpk0FHVu!Y0TI}?~Zpb z^Vap*jKMU~Ln?d{-~Hu5p1GvWZ=N%}U%)|NGY?2g8T=d6u9mo={qqgI!#kR3hBIKN zr~TW(;n$`I*%I-{csKI0Uq;luL}}p&;#1uCO1DXG*hqRGFoh}F8G0%fRVf6G#g(F; ziQ_m6e3<`|f;_U`?HpevdL>`$br!8psH4hAw813d4#sOA7C6IDB*tM48B*wih1SIM z>65m)FYDs=>ArJ%yEF1G>khZZI>b0jd9!SarR}y)amhKAw%IBFj`G+eVT#a9jj7go zyn`Kjb@br8&ia!O@A&DwWkGf_w2ZlCSe@2128KanQ@LO^ z-+pcxk#he9MsP@%__9=LTZSqHG1WSB5sfx$KAXWcaO#3 zQzM%?%FhI6%GS>3hRrhMG5wNhGJN~gGVaI(BT{HKIDAkIjNUTCXknByj#aAG7i3F8 z3(KTy6(^o@c{PCxdSTyKCMux;Lr1U&9sq5<6Wa>Gh3q-r!=&yExDgX>*Q!Fp%NnX! z#mMJQ_XH$b_ZR<}FNRD(##@_@J7%n-+6@bviH}~Bo<0}U?BRyl4=kx9OW#Kh0jy}!4Ac)&HUsQ$ViK%h#OfcCvj|gQTknE`n z7MJCuHw|g@6)G#M;=!J6lqk()ts4cL+fe|Wsn_KoNUq4KO0YSu$t(6?Zg*!Z>@vU zr?gu3cP0QFl`(xO-Us_bj-hR>fA~jcKR>zm(`t;}OP>RoT`dv^XW*Iskwi*RXk4w@ zr!S;8D)?|FNuh7;NP76ko=W1Qqn8TTN=j!<u_0&-67bRWH4o)%sP4|cCC zjgx$kP4Gh9J+pC6wX8{jThRg063@bODZmnihYaC*V`>>WG5+Cc)J#-ya3*kfAB6UM z>1{Gp-poxzWu9_ILm10=BOfE&?0AieF|rY_)aSTlsxL>KU)n(paUEd!BFn13Rvc%? z1IdWti$%p8?>mUGHA#pgqG+ZWFgic;?7765@jCEl*A?rmN$-$C4%pcd*!yog8m7+a z_S7sK*5wg_ZF=PG$s=3D!c8GEo3af%|T z))dM1EBh2>8ngM{xp9COAhrdwlK0O$Nb3L$B5*v;c>x^6oR_Z+CM8rry>WH1SD!x@ zAFAa87pm2bMi34&gAdPPraki*Jz}?y_j1{yDzZ#T=uP#+xpaiC0DPB8eb)RIa@gqd z={+-up&NvhhtGXvh(=&$gDWhW3q6AAw1LWWKT2@n9XH@TCSSSVzSSUa5 z-}lDHjNiYoOsJ7f1%$Lc^+glc3gK2s7cyzQoRZRZe1k#X`Ry9>`12D)6~;4HdSi`K z4f7LIRy?XAGhoJU!3F|p5=oL!2=?{76A)C1VFd(L87VA085-2Acz%LtzIJRMYj8PT zHu)Z1s0J6Y<(D)gEk5D_@{#nJHb$N=0|6tXoF6v*^1Y^PluQsGvLTm-A>-8N55+B$ z7WJf(cCXy&umRRve)~nnl}5S;XJHgE0(OWNAE3gB&n9`q21Qgzg;%f-LnH(urF$lv zQ4RWCaPms3D}u*v??Qid#1;=|ueab_-D6!OD>ab?64#6YgQCX#h9s|(mS-R{ z;b0EeGW(7Ax|vS2s{C1!WhR@ryN|x`6GUev@HZQ)6uVc}PW9`wi%pI`yTZ!Q#fIeB z0NEVkB9$RsF^DXHMrBCyypXQK^7FoZ=dV&i=p#tctp8AEj7{Gk{tY$5^LMd@YCt#V z5sIN_V~g~UwB5*O8WZCc{>xUZU;#3>oYn7x7V_6tc(3@J}W(Z_MkwG#@k`l59$qYJm{PS@s5ckq2kVzS#%p zlYcL-rp@UTnX_z0Vu$Yb!tlkm2bb19w1=Lg$6K5cR`G_p*9nR>@5+TXjVF+rZTTHxZvXe}xABJ+5K z%Qb?K9CF)wj3cXXsXZzFbV;{XnlxZb3fzg@KkCGFSHP9zBhY8W;2d+P$d}pR?(_!N zJH<@2a+)d)IYE6sGHg!gyNxIr`(g{74r6TJ2W@Z>#8es(V zFI6lE`Uf+*>e0%tJ)mGh(*yQ+oouu}O>&7BGBxajwY|{k@$j+v-1{y~Xj>^Cm}2}2 z+Mz5bU1HpbH+eh@=C<>gd2k^4$l2Z5ww}W~dLdzd|3#;_F2tF{EYS-YSVCVCTXg zk1_x+FKt1CD~Xdo)G&GAA$`L5fOZq8d2!9x&FzDL}i87q5)$<)9G?_%m?o)g~RTEA7I3Tnn-yPF5qEXc+GzAIUUwXB)GUE^HS^@+x)Gn8J&W z)_nynB%3g5K(J9COqvwE25(rl`R?NxyzVr+(^7sCag;#6I=Wb+`-+fsO|}Vr+ta`T zdN#@>K4uiT0L&7oAo{ zrJ=3!h>8#aeVk$I>@S&OiYl%ISEi(+u0{xX7Swv>MH#{98t35v;KQRi^xYQ5+dT3T zs3^EcUFWGGDnKnQLJQ9Ax{Rx^Pz-ciG}V}UwJOqfp;jV#!Rg7;Y^8Ld!X~1HHg#l1 zDF%N;8AYc%prv(fv%}py-^z@bf?{?xIczNe=`hZ^Saj@s-%K2OPEN(K3O4M4lqB!M znnk3sm`IMJrU#+Qfa=qHCB)}OjDt1JkIA!)dr55u5ZS%y zJ|;|MsTba8F`?K)PylR3%a}cNF%S^xoYw;WfL0sTX%B64r1ZugXedk*1N_%cm(q^w zvLT$`bRGbTllwNOYJ8KZ34lI_3$~z$aZP|5LU;w63hUCRGYxJ{kpTfV?a%J-ng?oc z6UQ!YM8MehlfU-a#x5w?g^c=w+b9oS9hSV}Y)-TJ^?U#Yc2wRpwfr`(lty0O)d%bC zvubkJP+;H`Si4P_lC9c@rf8N{SM@dO`3sa)y4C6_GWtaE^e3|Ej;Gt7iXYC@#eL4_ zeer-u8n>tB^`PYeDxgh~Spz~Asp1?P0((O3ef-!W+p8KrbIc3NmWfjDQ2B0ry2~C7mJ<-oyYb#&hUaq zAQ#2sXm(FQol^pey_^C@&PVxqhn}c4_Yb4!7N!qKNMmct3?viBhm0W6`ho%e^u;DJ zw&LAInN44hEIm+bF|;weF6zeU&1v&oc8Ofe36T|=c02`t`wde&B=)|HvhQ=RWSh+H zn<&gc6DgNOwm<%KJD=%(vt~Q}*$)5VI=flHail7wk8Zj73qHNgS5$m}F02^9kGppQ z?zb6So3NFVHTwpTnN>GVlQC~_Cey;P6%!SsX%7d;3xl#3RT9|9aDEcl1_={Fol}XNjn9Bwxl#98865dOIil{z0ihz8We8~p%d$RrC`OFt<>6a zR)gYE`a;<-F5CusBT~AJnizs=V{|+q%Wpg{dWS>sHO*c~CZjx9%}!$>I_Wmc_-aOn zi`s6;S5`!;X@bCHzJfJpS*L#i+-G>|H1?{Qch}NeXZg49<91h$b|U%)Y(^C4h~~W1 zpCv&1v3src$`5vH{>;Q15{#sqciGY((gjxrezqBNl(mqGv=*g#Mk7p|!7)whY{HuB z&c#8?0WZ+U1Xkrbr!}zgB+h1>S5gWgi=s__%>>ntf2_^c*H7O)Kh)>p$=%ZmdmpEN zmlXaHljzT^FJBItk6b2dQswOMAo4P8A%ougrG%Ma6s)2xna>~j>Z@PNlacLL0Y`0v z;U(4cWnn0JgUyej6z3N11g>a?oF`zO;Q0IW(Q3R{4MbMj1+y+dP6-;|xKWu1){N0f zy-|i2Ue1KmI%pmBji$JM@Ib@a_7GS&y2ZE?LjTX`(_%2h_>X3;A~QCDS7L&kS#`Qdk8c9{z2=n zg}IEiQ)@C%ba{{S+2|?DTw&0bEPe*v-~(89?&zL8gZj%9~P2Uv|I}P zmh`6=Uy6?{Ux3jgx`2rjCg7Q4Y4yDwRpZhrrw}EQ8&5OfArN_;xLM=bX!(tb zqDJQ4ki*do3vxcAYpHygKwu(1++|Bx%K%4l`M9vHqa~xm!x{5z?f&Z?9}uFH8)Sr^ ze)qxu^IS&$a;>cHQm6j~l68L5JD-qKoQOg*C{{zVF67`4u^nb@{4+YXIw(0e7@5ah zDihE<{L}o!XOwF!F%pmP8#)phwCqH~&m$R0i@nP!1QzK4ztL1fK=ip^g@M!uzglW+ zFuMhxdp9{|X|dKunRX14895TZMzFr%2$sACCaq){^({eS-%qFWvzUbQ#dp431djHk z&7|*aCc=epYn~*`z;CwxNJ`+LU8w^QR^?T^Ss11}eY zPyopIZ>f zbJ)jSh9WIUksrA78aSH|P5X)LRLv?BVCGp5VqI0dOXrOEOXP+$TU(t(PxD61e!qF! z5hx^)P?s(d>h~q(fnic#Qqsf^^}$bPuOPivkW6HS6AQUtPTiXcGEXsifWcWX_?Lj# zl0rtH*^;a@#i5eD{F`Qw$ceAX&QS)0p>+?-&F%#Wpg!!cHeu|$kRrC5K7sde>8$vQ z3Od^IQvw89ehPv@Zp4qw@-2-%vwtAa(v~}ds3D_k>o=s z#ZQZ}$wx}lYgm~-YqZ2xPFL-zJXu!&`nAAD(`tuN$%1OWD&B{Iobi#pcmn$ixMJ9G zc+0t=Pmc9ve|3(ai4}qKGQ96ZqOK~&Rws|2OsowN90J{GS?H&eAsbb$qhhRCclua$ z#}pSC+B&m0Ke4Ta%R0)~`<9)CW;UbkJoeEX;M&aPG09!Ux;eX67c74@WWh&@OTu9v z94f6-?NrVh`myC_fBtFWC-YwCL>LwCCQm^fL}_OZiPlq`D2x6`Be94O|*OrHsA;B92$?u!?q>K2d&~7H|^KN1hbErnaKKc6M=* z-wI*>5mxA$lMXaUc7#{j=}Z@5o;&7FwW#8fdq^$o*R@P}4ZZ4z;tCNxt)nFn1?#;& zWHEnk80R`<+UKt@5TH5UnuO(}oUm*~9aM)YIf}56sfo|RNqeWyMt+ipKqeOW;mB@} zYGU&9=zi_?Xtq_;p58t4!!m4yqI=t!6$6Gwen^`^%Z^T^l7p=Sy!C6g&;Q zAP}#nH)(bg*D=fjm)`E0fg>w`l7?DrZa1WL=8&M4-?d;F4l6Ny;0>9v$joCvSfP)M4b2l7<%aPtLuN9CIioPe27~V*1sF^(i99qW246NH>8S7&LVX!3Qp!h8lkmsTGZ zLoUb*>wwJEVQ7}&DHX3R6)zK=I}c`4D$*wEk)F69RM7BzjByzCZ3#xzs#&Bi-5;cC z-OVBwFmM*4hg>uiY(QL!c-V=aLW}Ub1q>o$k1Qs1$^a}hLfBrmTKe4E9>I4IRdB3` zkV*@}y#~$pc9fB3(rDvGuW}Qrq=)2Im-X;EXvn^^}im+{2yB8|Kq97nliJ09ycX1{{N?y zw&Kk7DdP_H%;C&6<;Eb6F;>g?lya(YePfvB>#56*zG=$2-I)dSn`7 z&!D{1e3t|Qcc5xxwvRqMacAx8P}1VQAs_e3nLEZAtMOXKCzVsojY-cA%}hw9Dn6}0 z>*rAJ#()S{tr;Ix+H-C4-wGathI%jEn1;NWey<)p(&96D&i!?ZZJYbzv63~z`O@1z zvkq{gO1#$6XLFCFjiSuNTB{54ET1=zccgEVp!|oonCZnQ8s2PiVoqJn)F?Xz0OR`LJ))SB@%N!Pz9o zCqD9HraR-vEhp&vbl8Ymz;$x!+b>!lgZDU_H0pn99sz|6MD6_L%Dzv6K%LWO+dMjq zG~(^?+_&Fj|GKf>R`0Vv>}EjreX?^WCFd<^^;5PdPTgBC>53Meoe_tvSuUM-g=R)- zv34iqapT9>+U;c{e+2OZn+9LrlqDa%Te9fsoNA;|LekNk8}ZnCEa=gLBD2vDlt$RZ zQ0Gz?EBb23y=2e{%NtJEoLl49iq@-sDb4e>?31ORc?`UcntWPqFOJ#Fb?0B}Rehbo z1+oiTN?UMATgAMXABXK%KBjHl1AG79dM!d3gO8KPR>x$dQyEB0NWu4A&s3SNE!f9D z127DiH_Ict`L&T{yO%$JKhM1`rL^-~77Z)BR@Cn!Wjfu-#{_%tBKEeS$R|8P|G2~R zQK#C13%=i4@O|j8X4wp{@y9FC`Jtg_mIT*#zV_}J(62`PJ#Z2BOpC`0KJxuTw<&04 z-F;D)o#c5T_8;zH*DPA)jS~b=ljrmVEHlcKmmrYh1^TQ9$@7XHo(XkX79=@(hA?Ew zD@y7CG&dQBDffgOP#~{496L=hlz?368FBeZyA&Uqcd)!T%)}qWRo?sd(+!>m*hP0g zS7{lWG3I>k4DVnY&bb>(rFs)!jW1Ui+xPnQ&iVuqPqUp%KLGfkrL`e%uW{$J)S0q( zjo1kj#Td0RAy=b**N$Up_tU9~9ZsdYda3C$BNeCEE%on#tMq$+3#C-&|9<1>S@g}( zx;RxvLx)fxoGl6qr?RCvxZBV?^zMX1i!tLa+Rst+C!yY>A|0i$qc{7qMQivANBdG- zH2sTw;2Vg0lf!JA}kEfTsGstMv@%>s{DsJbtv?D+S}>x+6n?QqX`C0WI5W6 z(;WWh$CsY`tMSW1C(2SkmicK1IM(tbXv4lS_pM{wY{>POrg|UmGL9d;!?iX)Y;w-% z+;6E)8}WphZ&ice4Xy@BOOQ4if883j$>~V+(TewGavomW#W1TkhzHi0a*?(gT<<-e z5Nxk7;Mo9!e0znP=-Xe`m^XPU(;M1!|Frjv=D4y3P~y>I z)m-9>vN%YhXILwHnnt`t`@rxDlVjFzrTB_>*uv30&X@fiCK;22q*KUF^s8C*~QybhKlD82hE^d+xUsU)BFTYUFl| zc0T-yX!9HL?hA&;jq+m?$-5N-MS$%CaJG|xuAROdVC-ScZ@dPQtghLNGqp4Gtjpe4 zv25rL3L{>@(K+4?#;fIB(_W?hL$d2uRN&dfJTSjeCdf#3_~1z?+|%(qQfluwON5K2 z((^{b%rZYZx>51aa2H@-{aeE80(tX!QWmX-QDXoW-5i;c_lpxoi?mc5U^(*F$|yp6 zu)%yN6BvTR#jd3{R}z-QRb1rJgXj+;a+1ci;QU<|QPZ;CK}}5FjbFvu4-dT~e{nnDgyZBT#0?K6@2; z_bF!{@NtN-EB>{#w24}i)ezu$?7{~ z_uC^VG<>_$WQ|Q}@qFxKqk!kY&J7egr+_>AAxmPCF65D!N10gDf3z}cTA#nh`z zWjWIFC0^lNmOrpmx4@R{_p{I9V{&d8Le~jKCUom^1@T;<|=+|CkT5$Cb*8WuC z{g;v3ee74~&~;@x+gaKJ3%PY6eS{}R&!dJJIL$;`*)GYG#e1(CThtlt&Fp*O^LP6R zraxeOx5I`l6M@a`1`b~F6Gq6O&mJ5`9^BG#{+mz@7*ioD-Srv9-vB2G8F^K)RZb+7 zw)JuwGvQ`*1T^xNoCd}t9|epYDS{$Ec-LNhPOa=L?R)rs6n^4Sgdq3jP$th#lb)-m z!kqcjXBVn9VBH>^n45FUtm?bO=XQ39!>7MH_WRg}xy^3S&}IjtYI{aIcFvx4^xC;6 zc?LF;RR@+~KzvLRL$XcMmO?4%p&Y13#sO@Vg2$5<9RAXzezl$s@_B2B09=%9820xo zLYoA^wZCy_7RnZRZQ&*RbbGx0K$ z*+5Y%2;E>ecyLRNpq;LLUoep58xUdt;R5k!Ohm4zMYZrazIO5JM$TErm6WnWy4koq z*IdoHQ+)x@G1%AzS+q>~}h0S#BdSEqa6gj~9C6-ToZ^QHKXJVI17+s%<}4S`Y7bTuagPxr~X_l#$)Z*gpIsq)>Pz`hLEk!AF=prL3BZv$VPUI7DfXJ$iQ2VNcbt)4ZqEw&B3&4R$>!Zu5+9HaczSv zfr{evTIcJ?Aplbo!zIW)`+Kww2oSgDQht(w!n5Dhg+8*i;F8$Kj97$9Uj2wa?Obdd zk;JXaEhE@39IzeHUA-h}Zq?Ja{*<6ezMi7P3%A^Fkv{QXHE43y0gcxOwAbzn^D*6F z)cF-1TOM?EUWD*0MTO>@yTO}3ucfF7HSrU&sgY=hs4Cf}I(F{bzqP~R@7~%xIPzMB zJ6bb;mb(uqZ|a2nKh1r2R8v{MHVpHEj(`ed0R$8jMiCJZkQQ`gC=Sw;CPH*5ks1L* zO$ZJS!blMiqzKWMP9RYuASD46=_FDkH6UOlp+g8HK<-YQd+$4I-9PSH_ug-P{6(@( zvVPfT?{oH3e$ROvd%kn=d9<{b&hAH)mG!&H^~11kw!`~~5@eIpqe%NjHD!C6H=<&2 zQPyc8tjS}*a)od$b+E3PQg5?PZJGL zU#l&v9Jipj4*T5c8W+WjSixd1BPJU5ZX|o@Tbra@dt=ZvQq#?0UGKd~sZBPE+UZJq zv!KlEx&b(Qcj|(!9W5F&=G=R9POmt6-eC-yR3JA93O`I(ZhvqCX59MpYH|;3uGOaU zE^*Rmg~;$pXI8X!tvu~?CY@4TTJVAyG7>>snyArvfsXHl;e@+QNBLOyl?;Y}|#0vwvP4YK-oU2F1Vox7B!>qowtE#?d)nTA6$6_1DF3cF!W}=kPCb>wVe5_3x z`+Q{RcWnNy7|Wyj-VYt2_H!9Jz3wz0`f3okccujuKr$VbH|PKOy;X1%n#i|&OWXX@ z0UaClMl0rq7!npsbRm`B-`w)&={G{oLOtfDN|m7U`%;GFNal8UWBRMhydV}O$TP~tgr)({~|O=Rjj zuy!zD?N|=wW>|tXiJ4xnZupssjkflFM+&xwhrfWgr$Z^PPT2Gfn-xFSm!n?8k<>LRwNZPD;ood3L{(5 z=GNmslkxRK!_+5?$EDq z8vf_&qclh<@9Ne7Z|N}q%?XEW@X5oxRrEVv68{Y$F+n%bh@=I-UlV&~_S_47!kJQ;dV~e7d4>Hpk^A zj^DJjv;7R~WiX?S@fs?aQSa>fi043(EHQ%_tp(AXqly*>!uB*>nXtY5*1Rc`i6uofsw5#>@jS5t~LvF!&Zx5=F=Vd z)go)DU}=1=@71pD>Cuy7NO)EoP0H;~NzZ*Pw~ip_DM%9@+#;2S;UO2&rcyixDbi(7 zp(Qu4)10mq7ecz3$M)`SY*@NJeTbILzcA_Tyak z(Q<@S8C$SDoonDh1U$eMhB@YwA&63H@;jev+D6&;9s5_e%c%O#nE^MQ#+^x=&;9Bn zOT*whQCE3YZFqL(R=oiZ(Wwi~OI-jJO66;0L>AWku;a`+7Qt+aRyo}Q(fTR45SM%Q zd`DR7D~L*OD4KI-B+=3UPHLieG?52jj-vM&+V}@=mBJJItCp{fjJ4NV9rcu>{qR}F zjK>1)PovSHiEwxcx`o+}`kdL$E4b*RF=z}fI?-t~WgbVvv%)iXw1kj^!E#Labi}w- zXpwDk(4$-%B@xS$qc92TX~mI#MBhdwFXfV~8t~4%avxM2Q3(zlqFY~5m8S{%CU%bS zxq}1Y=bEK9x$41*`Q9KDY(1u(qyMCWU>Kt}Xp&PD?Wx~}F`ung9!Y-PO@pVdDss=) zt=}3gl@1(4idEfv<+>h%k&jhU4)hThVavoO)g!_zQ1gg4X6t)KHRc)DYsjLv`kXAI zsshtP&l&yP;WQ6DLFS~*_Oae}j$J&PkEhQL85c9yIkH0(@~1u2HvD2)4K>VNs&vXZ zK~1_5nkTqJ&0C4aa)j(t3R-@tR%FgB5`@cSVe)1*o#=8TD8CB0`q zl1+aI3^-RI8pIU3Hdr>|i6*qHmOr;wb6QWo#|(mam|#0HQDj+t7j!6r41qCN{W48+ zhTINJLU}&hw^2<8^@>(F`Q}UX>Q^pqZ#qHGl2M0>nE-r<9^gYfXq+#kM)8l+cxh!_XcQd{}-Le0<{&qec+fRE6nDor#k_O9a$o^Q7dsa+bQ48(;HYx5NolyoVEzwej zUghqc)^$xzX$JO-sX8kWcy1FtYoKR;m3rE}e{!tmgVv>XEuAM|)DH*2qYedXM(xo} zrN@Ew8-Pf>dGJy`a2+We!((isHT#3!D9E6D&zJ~M+Qp}3B1KLGdp)VBJc)lQO2oI$ zRmNLEx(4MLy7jRn#08@fC%!j?3et?86jB%umW|kJ zGt#0(CYf7G_L9l&qO#JLk9=e2$o;v_qDd=@d0E7y97?Nzv=9oYHcWfFJR}sw8-|b3 z)j?n6jSEcIxT61otWyLgLEh+b!Y^)n`Fy*wE8p}X& z^I3(DQEnn*q1`6jsZ7p1DQCYSVplln^&xM>U4ZkOS}p65^rGkH$UTQb3sVqlpWAks z)Ubv*hnq6OJ)ybAD-R9Gum7B8`8Oo-y!gB>IKQ!^`q)J^C(%cKAKF6=?Grd7Qiu^( zT#@8y_9_rPFA;HL0ZJml9`yw^kkH=B;-0J{Wn7HT_i)Sl4AhYP&M%yVld)B! zUiQbls?hn)EaM0Xf|21ve-B$u2)GfRMA3(|O#(Iz2K7FVzpPzs;zQ4$&!@lLd}*Xt z9CbtH0xo0Px1cc473{DW8%X+fj@2E-ef3@!kcdDSBh(yvtDITcTPeAbgi$OL`AT>| zfn~(vcFy%_D?CQLn3*d^IzlGYEZvy!!Xn8*xNoyBcQD=O@- z5)I<>M46RvS^jvo&WW_h^cxJ50!OIW&g8&1n??Y@5iT;pG(D0xl3F)b9*g#RMjS6? zTw>hTCFH+1&xn2A$O_io{W?v~Kd`O#%fL&&$O>EtpJ%XbtURZLsbrulY0CCaF_b>l z_LO(Wze6UDXIf3u%mX@xn$q<4M?4;XVpXW|TK3sx*B4JFo6FJN^qi_$SgKLR18_M) z?X6Au0vTCFgxv_df5??8VB3)9zLR9vAAD~2oBeX?f-Fhq8F|I!Z6koiYUXg;6t5|x zmKevKt{}hIVm;(5YgDHa^UG6h+q|i1N_3w)qFgZgYTRx}@^5YLZ}~rILnJfFuZHA~ zA-K3jIB-jyN8Z(spKJUwYh4G@j_mtPG+j$dy5A(fq}^~eQQRo^&9khfWjABl1^(%g zB1l}G-bg4zZ|Sg)Hg(;T0a8KmYr4L}&tM}>xYtzQ&C3|N6~_XR>q1MReq0SZh<);P zBYcI)Ee%*pG@fxmACGWvJ$F@-+_Fs426u?FKR`cQ{NN{V3ISWVAfimZ#8K*7EM(UJ z%^Lcl{L<4uUDU?yjz~yB1x$){zn_fl)n9Z-j`<(jxoBbWD7yUBK zP8uSJ4%3Gf#*v7!)dYfB_alSUx2bs-!R#sA8NAr30OrwD5~SNYD!i?(UYdJnApF)T zuMnrt+qA8v6JS{@RdS~6R*hROFA!}#|IM)z&=ajo;R_iZjI zF}qGR0Y1-Q!ccCm8;1DUU5-pLq+z6y#_L!cWbL=FOI1@~@j|yF9{k20Z#V0L@{1ck%-1j3L%gr9@b0g6P9+ zJhb^C%oCUiWR{+eoefonMt#<_IzEa=AAmP2dr932O>~(a`Jst-UFVv#EEez}q}MFk zL%=C$0hGZKeYcuz@y3mJ@Mp!Z=*L@+?Z(2AUCUAswQ#ard6{Tq;47cDaz{d=VHAMPGGq{9~`i!7W8)gpDm+rQZHR-}$zfK&8gA>Z`Vdu}{V0uX}Hj7rRot zoM0bc?R?Tr4H-dkkNr0s0R=Q3B^tn~tXxKK)&kE;a)A0L+@5_0T56P!LNT z()rTYYc>^ypXNIg;=)p{k-hoJ!Z7p<_%oceUiEAJHF0g!vV{a+jajtt!t zTRD>9X1Z3e7b@I~5J|jr=>RF&Mb3j|1+5ap-MxE{SFp2uxfeD!qc&gX0M)`!ICqKg z4tfuNDl?<%IR2FH;ZT#QYd)$sl_pR6-ZD(fy%K1E*alFYha7M&6=^#;M||F+%mw^F zZz+!H4V^-^z&5#E6{&J3Yj+RZjcKQzB^94j%1d%kSPQ|8 zqMD>L4|6*s@@rL1CNsKZk0ub}isMcp)mbub%;3eu!a#~>qGN!{#8WUq%20XxXEhK6 zh+9W#=mhhlpjK{&b|cM38ihQ?WNE&;A5`{j2Yb)ZUJfgBdqHA&JUnacnPbw_=v>PV zCt6ERPka1RdmTZThOG9rqWZN*GnQ9j)mZrVGlvPglOm0$o)Q*=AeOPwl5BCKl#2_6 z?Wa-9_bPPgfY@d7vITKHdMxFPSp96Z)_QvQ{crOA)>EDGN%=wrYNYE_=@|vEPU~5R z!_`@T{Jx*1>S)jA8Y@$l2Rap-AV46$%0(+P9{Lcpa(8?eRaYVei&-gRU{%AzVTzQa zNiYb_u*e>gdCv(H>}$f+5HSW4e`G{XfoQXS;pyh`o)Ie3-lyf&fa&|o@*9;0UUt%} zdR~VZm%YJmOR%kuPkRRsT9(pB#g>`xZ|gQG>I4w*3>UsR#E}uL@O%NDVdg+|l-}Kd z9gx#V7(g*9p6ARoMRjP`hFO~t$st5^biaxaj;}*hq0j#&?#!}`L*gVhBEOaDP19}D zMms{Z4kDOY!%hN3IO$rJG(>o(FN$4-If*?YY~8kPP|zN6K$x&3^5hYXFzr`}gG+Of zQrkwbg=~vYc5E7@BoyM9my87daf$LsGkuG>!ja2k6y+OZP~t&;agnsIn)R*uI@h>F zk#Li37vqOUQ^YaR6h6P7Jp8A!R@sYHrGtt#f-AU^N1(@z`Je3FodsY^=-<`(ZUc{P zh@Gw62iPA(GO1fM$M26}qUNAqw!|Gu|J2$luyjPNMi)bV%(t6l0ysTbW2trWEFtG~ zxTC31GfPKLQF+KG5=d;5eU8o+|65!dFu;A!Dhm z7F{;L0+gt*-0V@?S16Smz8pnVx-w&Br^O=g_E+sO)^(FtN1n=XErB(Z2K4qw4+`SF zVL7|VC9&?#xzg07>OEi)o%mgrP%|0RYR^}cYVxai?wIPGs8r}RggD$3q8(8$P=Mc^ z2MRFWOtBQlHITc2)1w3x%mE=?{3dJ=v7Tw|h7S9v$4m;~=<~Oh`R^BW9TY?nk9L0Y zp3R(ZT@h&MK0i4EHiz6lW%RD+`!=g?8+pU=0heus^alhlWsb2(I6NYhVsIv5RQj&F zz)W^!I06ePeNy&UL8kdE^UVfr_dgQumz4e^sCc}&_gQz-taVr54FuGMZUI{BQ%+f^ zh-)1km(%^Gn~#awRK7u(vH6yTyifNskRKQSv^j5k($pc845P5oyj&wcuk z%tMC9Q)8_&^bfy>2(VFew!znW!wpg}pH=v{ApX@XFpD*xcdcRc{sY(OxG%SF@>SRx zNh2yoz5`L&>!~`%&`IJIRxY5|4I}^Mv>A9ikt=bF_qq+`W;T3(VlN>Iwe2x}$}2_{ z^Vt0b62hZ4e6tz)6AyZS?Gh~AjY#We&PG(r4Riid&c~theR8SmZ!3}>9eSwm%?Iws z05Q|$(>y)}w8}~&tnhNi^|y>51`yDT$ltBE^r(w`{wAM~^>qIeS97_>=thfyNAI)f zkz0eBn-#%#yysMxcj;`~CzFq4>C*NB{*o)V?zf(jX zg^4yhB`06S&-gd8Xc}lt(X=sklH|ng;|kBwt5c=1 zpzT<8aEXl5b<_bhl?FW6U#q$XyU9!V&`d5gd{GQ;Y4!`Dw8(jyqXi#~7x7Oam7Vso z$Y4CP?XX)*Nx6Zj1JzKrh66M{V9FMN1RqYG=6$3v5OxVr#npz3%BFt)conh_*hLGwb`zh_U0lw(a;Ih37GJ3ArIQ zxi?@}o}9+RCn`F`8q!DEzO|TN7`z_TKf*Oo`)-#cvtge3_sj#tZd!v`xfX5TK^-c* zpeST0yY>06^KLvHJ*0YvLK+%qJ-5mpdLsw&TfvmPXPjo{t6q`t<3@vJdZ`FD0if5w zVDnC_!NfQ`nP5hqJK_jWUmSgG?uo&+eEepuFr7cLR+HI+pd$5iOlLU~YCOm)VDrD` zRpwb-s%FkMUM_f?S2untcpfIAsKF@)BUR33B57jUqJcy`9oVhjA&JNpZbE&Pta3S|c9MwtPe=rRImY6!Ff}BCQE`u^hSM_0*-* znVY2}b7m_w%vto~Mwc*$7Ab=}NS%`uf~X(k9=PbZIf%@BRxNUD4v3jt)A$0=s124pA&HW5@-siV{8Ki2~eAQv5l4 z?OUO&ae#^%6a*tQ0pY_{t)mc?{oe3JLF8Lc3#^hn98y^j7W_@)Zl_N2)o~?y zNhJ6D@c6X#Q~%SnwExy=Qmmy!$*k zf%v|0nIA*V|N9@h`2+JU0Mb+Z+e4RjEqn9Be*ZI;7yS2G?t_Dh)ZY$^hyUo5ZSl8f zGaVz@fdJo&e|r+shS`7lWqLM${QxNDTO6y<|L~bo?-m<*CWl0x9bZ&;+2oIm=2.0.2 \ No newline at end of file diff --git a/run_compile.py b/run_compile.py deleted file mode 100644 index 495272d..0000000 --- a/run_compile.py +++ /dev/null @@ -1,74 +0,0 @@ -import tensorlayer as T -from dragon.vm.tensorlayer.layers import Dense -from dragon.vm.tensorlayer.models import Model -import dragon.vm.tensorlayer as tl - -import dragon as dg -import argparse -import numpy as np - -X_train, y_train, X_val, y_val, X_test, y_test = T.files.load_mnist_dataset(shape=(-1, 784)) - - -class MLP(Model): - - def __init__(self): - super(MLP, self).__init__() - self.dense1 = Dense(n_units=800, act=tl.act.relu, in_channels=784) - self.dense2 = Dense(n_units=800, act=tl.act.relu, in_channels=800) - self.dense3 = Dense(n_units=10, act=tl.act.relu, in_channels=800) - - def forward(self, x): - z = self.dense1(x) - z = self.dense2(z) - out = self.dense3(z) - return out - - -class Classifier(object): - """The base classifier class.""" - - # TensorSpec for graph execution - image_spec = dg.Tensor([None, 3, 32, 32], 'float32') - label_spec = dg.Tensor([None], 'int64') - - def __init__(self, optimizer): - super(Classifier, self).__init__() - self.net = MLP() - self.optimizer = optimizer - self.params = self.net.trainable_weights - - def step(self, image, label): - with dg.GradientTape() as tape: - logit = self.net(image) - # logit = dg.cast(logit, 'float64') - logit = dg.cast(dg.math.argmax(logit, -1), 'int32') - # label = dg.cast(label, 'float32') - # print("logit :\n", logit, label) - # loss = dg.losses.smooth_l1_loss([logit, label]) - # loss = tl.losses.sparse_softmax_crossentropy(logit, label) - loss = dg.math.sum( - (logit - label) * (logit - label) - ) # dg.losses.sparse_softmax_cross_entropy([logit, label]) - accuracy = dg.math.mean(dg.math.equal([logit, label]).astype('float32')) - grads = tape.gradient(loss, self.params) - self.optimizer.apply_gradients(zip(self.params, grads)) - return loss, accuracy, self.optimizer - - -if __name__ == '__main__': - dg.autograph.set_execution('EAGER_MODE') - # Define the model - model = Classifier(dg.optimizers.SGD(base_lr=0.001, momentum=0.9, weight_decay=1e-4)) - # Main loop - batch_size = 200 - for i in range(50): - for X_batch, y_batch in T.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): - image = dg.EagerTensor(X_batch, copy=False) - label = dg.EagerTensor(y_batch, copy=False, dtype='float32') - loss, accuracy, _ = model.step(image, label) - if i % 20 == 0: - dg.logging.info( - 'Iteration %d, lr = %s, loss = %.5f, accuracy = %.3f' % - (i, str(model.optimizer.base_lr), loss, accuracy) - ) diff --git a/setup.cfg b/setup.cfg index 67f86fe..1fe0495 100644 --- a/setup.cfg +++ b/setup.cfg @@ -23,7 +23,7 @@ based_on_style=google # The number of columns to use for indentation. indent_width = 4 -# The column limit. +# The column limit. (larger than usual) column_limit=120 # Place each dictionary entry onto its own line. @@ -76,4 +76,4 @@ no_spaces_around_selected_binary_operators = True allow_multiline_lambdas = True SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = 10 -SPLIT_PENALTY_AFTER_OPENING_BRACKET = 500 \ No newline at end of file +SPLIT_PENALTY_AFTER_OPENING_BRACKET = 500 diff --git a/setup.py b/setup.py index 76ac682..cf4c14d 100644 --- a/setup.py +++ b/setup.py @@ -5,39 +5,24 @@ import sys os.environ['TENSORLAYER_PACKAGE_BUILDING'] = 'True' - try: from setuptools import find_packages, setup, Extension from setuptools.command.build_ext import build_ext except ImportError: - from distutils.core import ( - setup, - find_packages - ) - + from distutils.core import (setup, find_packages) from tensorlayer import ( - __contact_emails__, - __contact_names__, - __description__, - __download_url__, - __homepage__, - __keywords__, - __license__, - __package_name__, - __repository_url__, - __version__ + __contact_emails__, __contact_names__, __description__, __download_url__, __homepage__, __keywords__, __license__, + __package_name__, __repository_url__, __version__ ) - # =================== Reading Readme file as TXT files =================== if os.path.exists('README.rst'): # codec is used for consistent encoding long_description = codecs.open( - os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), - 'r', 'utf-8' + os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), 'r', 'utf-8' ).read() else: @@ -53,8 +38,8 @@ def req_file(filename, folder="requirements"): # Example: `\n` at the end of each line return [x.strip() for x in content] -# ======================= Defining the requirements var ======================= +# ======================= Defining the requirements var ======================= install_requires = req_file("requirements.txt") @@ -83,11 +68,9 @@ extras_require['all_dev'] = sum([extras_require.get(key) for key in ['all', 'db' extras_require['all_cpu_dev'] = sum([extras_require.get(key) for key in ['all_dev', 'tf_cpu']], list()) extras_require['all_gpu_dev'] = sum([extras_require.get(key) for key in ['all_dev', 'tf_gpu']], list()) - cmdclass = dict() ext_modules = [] - # Readthedocs requires TF 1.5.0 to build properly if 'READTHEDOCS' in os.environ: ext_modules = [ @@ -95,16 +78,14 @@ if 'READTHEDOCS' in os.environ: ] class custom_build_ext(build_ext): + def build_extensions(self): - os.system('./scripts/install-requirements-for-rtd.sh %s' % - os.path.dirname(sys.executable)) + os.system('./scripts/install-requirements-for-rtd.sh %s' % os.path.dirname(sys.executable)) cmdclass = {'build_ext': custom_build_ext} - # ======================= Define the package setup ======================= - setup( name=__package_name__, @@ -112,7 +93,6 @@ setup( # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html version=__version__, - description=__description__, long_description=long_description, @@ -130,7 +110,6 @@ setup( # The licence under which the project is released license=__license__, - classifiers=[ # How mature is this project? Common values are # 1 - Planning @@ -170,7 +149,6 @@ setup( 'Natural Language :: English', 'Operating System :: OS Independent', ], - keywords=__keywords__, packages=find_packages(), @@ -179,7 +157,6 @@ setup( # requirements files see: # https://packaging.python.org/en/latest/requirements.html install_requires=install_requires, - cmdclass=cmdclass, # List additional groups of dependencies here (e.g. development @@ -187,7 +164,6 @@ setup( # $ pip install -e .[test] extras_require=extras_require, ext_modules=ext_modules, - scripts=[ 'tl', ], diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py index 442dce1..be46822 100644 --- a/tensorlayer/__init__.py +++ b/tensorlayer/__init__.py @@ -51,6 +51,7 @@ if 'TENSORLAYER_PACKAGE_BUILDING' not in os.environ: from tensorlayer import utils from tensorlayer import dataflow from tensorlayer import metric + from tensorlayer import vision from tensorlayer.lazy_imports import LazyImport diff --git a/tensorlayer/backend/__init__.py b/tensorlayer/backend/__init__.py index 01e5c83..4533f5b 100644 --- a/tensorlayer/backend/__init__.py +++ b/tensorlayer/backend/__init__.py @@ -3,4 +3,4 @@ # load ops from .ops import * -from tensorlayer.backend import ops \ No newline at end of file +from tensorlayer.backend import ops diff --git a/tensorlayer/backend/ops/__init__.py b/tensorlayer/backend/ops/__init__.py index 96277ae..ad780e2 100644 --- a/tensorlayer/backend/ops/__init__.py +++ b/tensorlayer/backend/ops/__init__.py @@ -45,8 +45,12 @@ from .load_backend import Conv1D from .load_backend import Conv2D from .load_backend import Conv3D from .load_backend import BiasAdd +from .load_backend import MaxPool1d from .load_backend import MaxPool +from .load_backend import MaxPool3d +from .load_backend import AvgPool1d from .load_backend import AvgPool +from .load_backend import AvgPool3d from .load_backend import Dropout from .load_backend import BatchNorm from .load_backend import DepthwiseConv2d @@ -58,6 +62,8 @@ from .load_backend import AdaptiveMeanPool3D from .load_backend import AdaptiveMaxPool1D from .load_backend import AdaptiveMaxPool2D from .load_backend import AdaptiveMaxPool3D +from .load_backend import Floor +from .load_backend import Ceil # load ops from .load_backend import Variable @@ -68,6 +74,7 @@ from .load_backend import minimum from .load_backend import reshape from .load_backend import concat from .load_backend import convert_to_tensor +from .load_backend import convert_to_numpy from .load_backend import sqrt from .load_backend import reduce_mean from .load_backend import reduce_min @@ -112,10 +119,10 @@ from .load_backend import OneHot from .load_backend import L2Normalize from .load_backend import EmbeddingLookup from .load_backend import NCELoss -from .load_backend import Not_equal +from .load_backend import NotEqual from .load_backend import Cast from .load_backend import ExpandDims -from .load_backend import Count_nonzero +from .load_backend import CountNonzero from .load_backend import FlattenReshape from .load_backend import Transpose from .load_backend import MatMul diff --git a/tensorlayer/backend/ops/dragon_backend.py b/tensorlayer/backend/ops/dragon_backend.py deleted file mode 100644 index e62f27e..0000000 --- a/tensorlayer/backend/ops/dragon_backend.py +++ /dev/null @@ -1,1049 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -from __future__ import absolute_import, division, print_function - -import numpy as np -import dragon as D - -from dragon.core.eager import context -from dragon.core.ops import init_ops -from dragon.core.ops import vision_ops - -_dtypeDict = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] -# TODO NotImplemented -DType = None -float16 = 'float16' -float32 = 'float32' -float64 = 'float64' -int8 = 'int8' -int16 = 'int16' -int32 = 'int32' -int64 = 'int64' -uint8 = 'uint8' -uint16 = 'uint16' -uint32 = 'uint32' -uint64 = 'uint64' - -# isinstance input output -# TODO NotImplemented -# TensorLike = None - - -def _getter(init_fn, **kwargs): - """Return an named eager tensor.""" - with context.eager_mode(): - value = init_fn(**kwargs) - value._name = kwargs.get('name', value.id) - return value - - -def set_context(**kwargs): - raise Exception("Using Dragon backend,You don't need to set context") - - -def get_tensor_shape(x): - return x.shape - - -# initializers -def zeros(shape, dtype='float32'): - """ - Creates a tensor with all elements set to zero. - - Parameters - ---------- - shape : A list of integers - a tuple of integers, or a 1-D Tensor of type int32. - dtype : tensor - The DType of an element in the resulting Tensor - - Returns - ------- - A Tensor with all elements set to zero. - - """ - return _getter( - init_ops.fill, - value=0, - shape=shape, - dtype=dtype, - ) - - -def ones(shape, dtype='float32'): - """ - Creates a tensor with all elements set to ones. - - Parameters - ---------- - shape : A list of integers - a tuple of integers, or a 1-D Tensor of type int32. - dtype : tensor - The DType of an element in the resulting Tensor - - Returns - ------- - A Tensor with all elements set to zero. - - """ - return _getter( - init_ops.fill, - value=1, - shape=shape, - dtype=dtype, - ) - - -def constant(value, shape, dtype='float32'): - """ - Creates a constant tensor from a tensor-like object. - - Parameters - ---------- - value : list - A constant value (or list) of output type dtype. - dtype : tensor - The type of the elements of the resulting tensor. - shape : tuple - Optional dimensions of resulting tensor. - - Returns - ------- - A Constant Tensor. - - """ - # shape = shape[::-1] - return _getter( - init_ops.fill, - value=value, - shape=shape, - dtype=dtype, - ) - - -def random_uniform(shape, minval=0, maxval=None, dtype='float32', seed=None): - """ - Outputs random values from a uniform distribution. - - Parameters - ---------- - shape : tuple - A 1-D integer Tensor or Python array. The shape of the output tensor. - minval : int - The lower bound on the range of random values to generate (inclusive). Defaults to 0. - maxval : int - The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point. - dtype : tensor - The type of the output: float16, float32, float64, int32, or int64. - seed : int - Used in combination with dragon.random.set_seed to create a reproducible sequence of tensors across multiple calls. - Returns - ------- - A tensor of the specified shape filled with random uniform values. - - """ - return _getter(init_ops.random_uniform, low=minval, high=maxval, shape=shape, dtype=dtype) - - -def random_normal(shape, mean=0.0, stddev=1.0, dtype='float32', seed=None): - """ - Outputs random values from a normal distribution. - - Parameters - ---------- - shape : tuple - A 1-D integer Tensor or Python array. The shape of the output tensor. - mean : float - The mean of the normal distribution - stddev : float - The standard deviation of the normal distribution. - dtype : tensor - The type of the output. - seed : A Python integer - Used to create a random seed for the distribution - - Returns - ------- - A tensor of the specified shape filled with random normal values. - - """ - return _getter( - init_ops.random_normal, - mean=mean, - std=stddev, - shape=shape, - dtype=dtype, - ) - - -def truncated_normal(shape, mean=0.0, stddev=1.0, dtype='float32', seed=None): - """ - Outputs random values from a truncated normal distribution. - - Parameters - ---------- - shape : tuple - A 1-D integer Tensor or Python array. The shape of the output tensor. - mean : float - The mean of the normal distribution - stddev : float - The standard deviation of the normal distribution. - dtype : tensor - The type of the output. - seed : A Python integer - Used to create a random seed for the distribution - - Returns - ------- - A tensor of the specified shape filled with random truncated normal values. - - """ - return _getter( - init_ops.truncated_normal, - mean=mean, - std=stddev, - shape=shape, - dtype=dtype, - ) - - -def he_normal(shape, dtype, seed=None): - """ - He normal initializer. - - Parameters - ---------- - seed : A Python integer. - Used to seed the random generator. - shape : tuple - A 1-D integer Tensor or Python array. The shape of the output tensor. - dtype : tensor - The type of the output. - - Returns - ------- - A tensor of the specified shape filled with he normal values. - """ - # shape = shape[::-1] - raise NotImplementedError("He_Normal is not implemented") - - -def Variable(initial_value, name, trainable=None): - """ - Creates a new variable with value initial_value. - - Parameters - ---------- - initial_value : tensor - A Tensor, or Python object convertible to a Tensor - name : str - Optional name for the variable. Defaults to 'Variable' and gets uniquified automatically. - Returns - ------- - Variable - """ - return D.Tensor(name=name, shape=initial_value) - - -class MatMul(object): - - def __init__(self): - pass - - def __call__(self, a, b): - inputs = [a, b] - return D.math.matmul(inputs) - - -def matmul(a, b): - """ - Multiplies matrix a by matrix b, producing a * b. - - Parameters - ---------- - a : tensor - type float16, float32, float64, int32, complex64, complex128 and rank > 1. - b : tensor - with same type and rank as a. - - Returns - ------- - A Tensor of the same type as a and b - """ - inputs = [a, b] - return D.math.matmul(inputs) - - -def add(value, bias): - """ - Returns x + y element-wise. - - Parameters - ---------- - value : tensor. - Must be one of the following types: bfloat16, half, float32, float64, - uint8, int8, int16, int32, int64, complex64, complex128, string. - bias : tensor - Must have the same type as a - name : str - A name for the operation - - Returns - ------- - A Tensor. Has the same type as a. - """ - - inputs = [value, bias] - return D.math.add(inputs) - - -def dtypes(dt): - """ - Data dtypes. - - Parameters - ---------- - dt : string - It could be 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16', - 'int32', 'int64', 'float16', 'float32', 'float64', 'DType'. - - Returns - ------- - Data dtypes - """ - if dt not in _dtypeDict: - raise Exception("Unsupported dtype: {}".format(dt)) - return dt - - -def minimum(x, y): - """ - Returns the min of x and y (i.e. x < y ? x : y) element-wise. - - Parameters - ---------- - x : tensor. - Must be one of the following types: bfloat16, half, float32, float64, int32, int64. - y : A Tensor. - Must have the same type as x. - name : str - A name for the operation (optional). - - Returns - ------- - A Tensor. Has the same type as x - """ - inputs = [x, y] - return D.math.minimum(inputs) - - -class FlattenReshape(object): - - def __init__(self): - pass - - def __call__(self, inputs): - dim = 1 - for d in get_tensor_shape(inputs)[1:]: - dim *= d - return D.reshape(inputs, [-1, dim]) - - -class Reshape(object): - - def __init__(self, shape): - self.shape = shape - - def __call__(self, tensor): - return D.reshape(tensor, shape=self.shape) - - -def reshape(tensor, shape): - """ - Reshapes a tensor. - - Parameters - ---------- - tensor : tensor - A Tensor. - shape : tensor - Defines the shape of the output tensor. - Returns - ------- - A Tensor. Has the same type as tensor - """ - return D.reshape(tensor, shape=shape) - - -class Concat(object): - - def __init__(self, axis): - super(Concat, self).__init__() - self.axis = axis - - def __call__(self, values): - return D.concat(values=values, axis=self.axis) - - -def concat(values, axis): - """ - Concatenates tensors along one dimension. - - Parameters - ---------- - values : list - A list of Tensor objects or a single Tensor - axis : int - 0-D int32 Tensor. Dimension along which to concatenate - Returns - ------- - A Tensor resulting from concatenation of the input tensors. - """ - return D.concat(values, axis=axis) - - -def convert_to_tensor(value, dtype=None): - """ - Converts the given value to a Tensor. - - Parameters - ---------- - value : object - An object whose type has a registered Tensor conversion function. - dtype : optional - Optional element type for the returned tensor. If missing, the type is inferred from the type of value. - - Returns - ------- - A Tensor based on value. - """ - return D.Tensor.convert_to(value, dtype) - - -def sqrt(x): - """ - Computes square root of x element-wise. - - Parameters - ---------- - x : tensor - Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128. - - Returns - ------- - A Tensor. Has the same type as x. - """ - return D.math.sqrt(x) - - -class ReduceSum(object): - - def __init__(self, axis): - pass - - def construct(self, input): - pass - - -class ReduceMean(object): - - def __init__(self, axis): - if axis == [1, 2]: - self.data_format = 'NHWC' - elif axis == [2, 3]: - self.data_format = 'NCHW' - else: - raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]") - - def __call__(self, inputs): - return vision_ops.pool2d( - inputs, - kernel_shape=1, - strides=1, - pads=0, - mode='AVG', - global_pooling=True, - data_format=self.data_format, - ) - - -def reduce_mean(input_tensor, axis=None): - """ - Computes the mean of elements across dimensions of a tensor. - - Parameters - ---------- - input_tensor : tensor - The tensor to reduce. Should have numeric type. - axis : int - The dimensions to reduce. If None (the default), reduces all dimensions. - Must be in the range [-rank(input_tensor), rank(input_tensor)). - name : str - A name for the operation (optional). - - Returns - ------- - The reduced tensor. - """ - - return D.mean(input_tensor, axes=axis) - - -class ReduceMax(object): - - def __init__(self, axis): - if axis == [1, 2]: - self.data_format = 'NHWC' - elif axis == [2, 3]: - self.data_format = 'NCHW' - else: - raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]") - - def __call__(self, inputs): - return vision_ops.pool2d( - inputs, kernel_shape=1, strides=1, pads=0, mode='MAX', global_pooling=True, data_format=self.data_format - ) - - -def reduce_max(input_tensor, axis=None): - """ - Computes the maximum of elements across dimensions of a tensor. - - Parameters - ---------- - input_tensor : tensor - The tensor to reduce. Should have real numeric type. - axis : int - The dimensions to reduce. If None (the default), reduces all dimensions. - Must be in the range [-rank(input_tensor), rank(input_tensor)). - name : str - A name for the operation (optional). - - Returns - ------- - The reduced tensor. - """ - - return D.max(input_tensor, axis) - - -def reduce_min(input_tensor, axis=None): - """ - Computes the minimum of elements across dimensions of a tensor. - - Parameters - ---------- - input_tensor : tensor - The tensor to reduce. Should have real numeric type. - axis : int - The dimensions to reduce. If None (the default), reduces all dimensions. - Must be in the range [-rank(input_tensor), rank(input_tensor)). - name : str - A name for the operation (optional). - - Returns - ------- - The reduced tensor. - """ - return D.min(input_tensor, axis) - - -class Pad(object): - - def __init__(self, paddings, mode="REFLECT"): - if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: - raise Exception("Unsupported mode: {}".format(mode)) - if mode == 'SYMMETRIC': - mode = 'EDGE' - self.paddings = paddings - self.mode = mode - - def __call__(self, x): - outputs = D.pad(x, pads=self.paddings, mode=self.mode, value=0) - return outputs - - -def pad(tensor, paddings, mode='CONSTANT', constant_values=0): - """ - Pads a tensor. - - Parameters - ---------- - tensor : tensor - A Tensor. - paddings : tuple - A tuple of type int32. - mode : str - One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) - constant_values : int - In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. - - Returns - ------- - A Tensor. Has the same type as tensor. - """ - if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: - raise Exception("Unsupported mode: {}".format(mode)) - if mode == 'SYMMETRIC': - mode = 'EDGE' - outputs = D.pad(tensor, pads=paddings, mode=mode, value=constant_values) - return outputs - - -class Unstack(object): - - def __init__(self, axis, num=None): - self.axis = axis - self.num = num - - def __call__(self, values): - raise NotImplementedError - - -class Stack(object): - - def __init__(self, axis): - self.axis = axis - - def __call__(self, values): - return D.stack(values, axis=self.axis) - - -def stack(values, axis=0): - """ - Stacks a list of rank-R tensors into one rank-(R+1) tensor. - - Parameters - ---------- - values : list - A list of Tensor objects with the same shape and type. - axis : int - An int. The axis to stack along. Defaults to the first dimension. - Negative values wrap around, so the valid range is [-(R+1), R+1). - - Returns - ------- - A stacked Tensor with the same type as values. - """ - return D.stack(values, axis=axis) - - -class Meshgrid(object): - - def __init__(self, indexing='xy'): - super(Meshgrid, self).__init__() - self.index = indexing - - def __call__(self, inputs): - pass - - -def meshgrid(x, y): - """ - Broadcasts parameters for evaluation on an N-D grid. - - Parameters - ---------- - x : tensor - Tensors with rank 1. - y : tensor - Tensors with rank 1. - - Returns - ------- - A list of N Tensors with rank N. - """ - - pass - - -def range(start, limit=None, delta=1, dtype=None): - """ - Creates a sequence of numbers. - - Parameters - ---------- - start : tensor - A 0-D Tensor (scalar). Acts as first entry in the range if limit is not None; - otherwise, acts as range limit and first entry defaults to 0. - limit : tensor - A 0-D Tensor (scalar). Upper limit of sequence, exclusive. If None, - defaults to the value of start while the first entry of the range defaults to 0. - delta : tensor - A 0-D Tensor (scalar). Number that increments start. Defaults to 1. - dtype : type - The type of the elements of the resulting tensor. - - Returns - ------- - An 1-D Tensor of type dtype. - """ - if dtype is None: - dtype = 'int32' - if limit is None: - outputs = D.arange(start=0, stop=start, step=delta, dtype=dtype) - else: - outputs = D.arange(start, stop=limit, step=delta, dtype=dtype) - return outputs - - -class ExpandDims(object): - - def __init__(self, axis): - pass - - def construct(self, input): - pass - - -def expand_dims(input, axis): - """ - Inserts a dimension of 1 into a tensor's shape. - - Parameters - ---------- - input : tensor - A Tensor. - axis : int - 0-D (scalar). Specifies the dimension index at which to expand the shape of input. - Must be in the range [-rank(input) - 1, rank(input)]. - - Returns - ------- - A Tensor with the same data as input, but its shape has an additional dimension of size 1 added. - """ - - return D.expand_dims(input, axis=axis) - - -class Tile(object): - - def __init__(self): - pass - - def __call__(self, input, multiples): - return D.tile(input, multiples) - - -def tile(input, multiples): - """ - Constructs a tensor by tiling a given tensor. - - Parameters - ---------- - input : tensor - A Tensor. 1-D or higher. - multiples : tensor - Must be one of the following types: int32, int64. 1-D. - Length must be the same as the number of dimensions in input - - Returns - ------- - A Tensor. Has the same type as input. - """ - return D.tile(input, multiples) - - -class Cast(object): - - def __init__(self, dtype): - pass - - def __call__(self, input): - pass - - -def cast(x, dtype): - """ - Casts a tensor to a new type. - - Parameters - ---------- - x : tensor - A Tensor or SparseTensor or IndexedSlices of numeric type. - It could be uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64. - dtype : dtpye - The destination type. The list of supported dtypes is the same as x - - Returns - ------- - A Tensor or SparseTensor or IndexedSlices with same shape as x and same type as dtype. - """ - return D.cast(x, dtype=dtype) - - -class Transpose(object): - - def __init__(self, perm, conjugate=False): - self.perm = perm - if conjugate: - raise ("The conjugate Parameters not supported") - - def __call__(self, a): - return D.transpose(a, self.perm) - - -def transpose(a, perm=None, conjugate=False): - """ - Transposes a. - - Parameters - ---------- - a : tensor - A Tensor. - perm : int - A permutation of the dimensions of a. - conjugate : bool - Setting it to True is mathematically equivalent to ms.math.conj(ms.transpose(input)). - - Returns - ------- - A transposed Tensor. - """ - - conjugate = conjugate - return D.transpose(a, perm=perm) - - -def gather_nd(params, indices, batch_dims=0): - """ - Gather slices from params into a Tensor with shape specified by indices. - - Parameters - ---------- - params : tensor - The tensor from which to gather values. - indices : tensor - Must be one of the following types: int32, int64. Index tensor. - batch_dims : int - An integer or a scalar 'Tensor'. The number of batch dimensions. - - Returns - ------- - A Tensor. Has the same type as params. - """ - - pass - - -def clip_by_value(t, clip_value_min, clip_value_max): - """ - Clips tensor values to a specified min and max. - - Parameters - ---------- - t : tensor - A Tensor or IndexedSlices - clip_value_min : tensor - A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by - clip_value_max : tensor - A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by - - Returns - ------- - A clipped Tensor or IndexedSlices. - """ - - pass - - -def split(value, num_or_size_splits, axis=0, num=None): - """ - Splits a tensor into sub tensors. - - Parameters - ---------- - value : tensor - The Tensor to split. - num_or_size_splits : list - Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or - Python list containing the sizes of each output tensor along split_dim. - axis : int - The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0. - num : int - used to specify the number of outputs when it cannot be inferred from the shape of size_splits. - - Returns - ------- - Tensor objects resulting from splitting value. - """ - pass - - -def floor(x): - return D.math.floor(x) - - -def gather(params, indices): - return NotImplementedError - - -def linspace(start, stop, num): - return D.linspace(start, stop, num) - - -def slice(inputs, starts, sizes): - return D.slice(inputs, starts, sizes) - - -def add_n(inputs): - return NotImplementedError - - -class OneHot(object): - - def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype='float32'): - self.depth = depth - self.dtype = dtype - - def __call__(self, indices): - outputs = np.zeros(shape=(indices.shape[0], self.depth)) - for i in np.arange(indices.shape[0]): - outputs[int(i)][int(indices[int(i)].get_value())] = 1 - outputs = D.constant(outputs, dtype=self.dtype) - return outputs - - -class L2Normalize(object): - - def __init__(self, axis=None, epsilon=1e-12): - super(L2Normalize, self).__init__() - pass - - def __call__(self, input, *args, **kwargs): - pass - - -class EmbeddingLookup(object): - - def __init__(self, max_norm=None): - self.max_norm = max_norm - - def __call__(self, params, ids, *args, **kwargs): - pass - - -class NCELoss(object): - - def __init__(self, num_true=1, sampled_values=None, remove_accidental_hits=False): - super(NCELoss, self).__init__() - - def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes): - pass - - -class Not_equal(object): - - def __init__(self): - pass - - def __call__(self, x, y): - pass - - -class Count_nonzero(object): - - def __init__(self, keepdims=None, dtype='int64'): - pass - - def __call__(self, *args, **kwargs): - pass - - -class Resize: - - def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None): - if method not in ['nearest', 'linear', 'bilinear']: - raise ('Current resize does not support this method.') - if method == 'bilinear': - method = 'linear' - self.method = method - self.antialias = antialias - self.scale = scale - if data_format != 'channel_last': - raise Exception("UpSampling2d resize_images only support channel_last") - - def __call__(self, inputs): - output_size = (int(inputs.shape[1] * self.scale[0]), int(inputs.shape[2] * self.scale[1])) - outputs = D.vision.resize(inputs, sizes=output_size, mode=self.method, align_corners=self.antialias) - return outputs - - -def resize(inputs, output_size, method, antialias): - if method not in ['nearest', 'linear', 'bilinear']: - raise ('Current resize does not support this method.') - if method == 'bilinear': - method = 'linear' - return D.vision.resize(inputs, sizes=output_size, mode=method, align_corners=antialias) - - -class ZeroPadding1D(object): - - def __init__(self): - pass - - def __call__(self, padding): - raise NotImplementedError - - -class ZeroPadding2D(object): - - def __init__(self): - pass - - def __call__(self, padding): - raise NotImplementedError - - -class ZeroPadding3D(object): - - def __init__(self): - pass - - def __call__(self, padding): - raise NotImplementedError - - -class Sign(object): - - def __init__(self): - pass - - def __call__(self, x): - return D.math.sign(x) - - -def ceil(x): - raise NotImplementedError - - -def multiply(x, y): - raise NotImplementedError - - -def divide(x, y): - raise NotImplementedError - - -def identity(x): - raise NotImplementedError - - -class BatchToSpace(object): - - def __init__(self, block_size, crops): - super(BatchToSpace, self).__init__() - pass - - def __call__(self, input_x): - raise NotImplementedError - - -class DepthToSpace(object): - - def __init__(self, block_size, data_format='NHWC'): - pass - - def __call__(self, input): - raise NotImplementedError diff --git a/tensorlayer/backend/ops/dragon_nn.py b/tensorlayer/backend/ops/dragon_nn.py deleted file mode 100644 index e6b5105..0000000 --- a/tensorlayer/backend/ops/dragon_nn.py +++ /dev/null @@ -1,910 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -import dragon as D -from dragon.core.ops import vision_ops -from dragon.core.ops import activation_ops - - -def padding_format(padding): - """ - Checks that the padding format correspond format. - - Parameters - ---------- - padding : str - Must be one of the following:"same", "SAME", "VALID", "valid" - - Returns - ------- - str "SAME" or "VALID" - """ - - if padding in ["SAME", "same"]: - padding = "SAME" - elif padding in ["VALID", "valid"]: - padding = "VALID" - elif padding == None: - padding = None - else: - raise Exception("Unsupported padding: " + str(padding)) - return padding - - -def preprocess_1d_format(data_format, padding): - """ - Checks that the 1-D dataformat format correspond format. - - Parameters - ---------- - data_format : str - Must be one of the following:"channels_last","NWC","NCW","channels_first" - padding : str - Must be one of the following:"same","valid","SAME","VALID" - - Returns - ------- - str "NWC" or "NCW" and "SAME" or "VALID" - """ - - if data_format in ["channels_last", "NWC"]: - data_format = "NWC" - elif data_format in ["channels_first", "NCW"]: - data_format = "NCW" - elif data_format == None: - data_format = None - else: - raise Exception("Unsupported data format: " + str(data_format)) - padding = padding_format(padding) - return data_format, padding - - -def preprocess_2d_format(data_format, padding): - """ - Checks that the 2-D dataformat format correspond format. - - Parameters - ---------- - data_format : str - Must be one of the following:"channels_last","NHWC","NCHW","channels_first" - padding : str - Must be one of the following:"same","valid","SAME","VALID" - - Returns - ------- - str "NHWC" or "NCHW" and "SAME" or "VALID" - """ - - if data_format in ["channels_last", "NHWC", "nhwc"]: - data_format = "NHWC" - elif data_format in ["channels_first", "NCHW", "nchw"]: - data_format = "NCHW" - elif data_format == None: - data_format = None - else: - raise Exception("Unsupported data format: " + str(data_format)) - padding = padding_format(padding) - return data_format, padding - - -def preprocess_3d_format(data_format, padding): - """ - Checks that the 3-D dataformat format correspond format. - - Parameters - ---------- - data_format : str - Must be one of the following:"channels_last","NDHWC","NCDHW","channels_first" - padding : str - Must be one of the following:"same","valid","SAME","VALID" - - Returns - ------- - str "NDHWC" or "NCDHW" and "SAME" or "VALID" - """ - - if data_format in ['channels_last', 'NDHWC']: - data_format = 'NDHWC' - elif data_format in ['channels_first', 'NCDHW']: - data_format = 'NCDHW' - elif data_format == None: - data_format = None - else: - raise Exception("Unsupported data format: " + str(data_format)) - padding = padding_format(padding) - return data_format, padding - - -def nchw_to_nhwc(x): - """ - Channels first to channels last - - Parameters - ---------- - x : tensor - channels first tensor data - - Returns - ------- - channels last tensor data - """ - - pass - - -def nhwc_to_nchw(x): - """ - Channles last to channels first - - Parameters - ---------- - x : tensor - channels last tensor data - - Returns - ------- - channels first tensor data - """ - - pass - - -class ReLU(object): - - def __init__(self): - pass - - def __call__(self, x): - return D.nn.relu(x) - - -def relu(x): - """ - Computes rectified linear: max(features, 0). - - Parameters - ---------- - x : tensor - Must be one of the following types: float32, float64, int32, uint8, int16, - int8, int64, bfloat16, uint16, half, uint32, uint64, qint8. - - Returns - ------- - A Tensor. Has the same type as features. - """ - return D.nn.relu(x) - - -class ReLU6(object): - - def __init__(self): - pass - - def __call__(self, x): - return D.nn.relu6(x) - - -def relu6(x): - """ - Computes Rectified Linear 6: min(max(features, 0), 6). - - Parameters - ---------- - x : tensor - Must be one of the following types: float32, float64, int32, uint8, int16, - int8, int64, bfloat16, uint16, half, uint32, uint64, qint8. - - Returns - ------- - A Tensor with the same type as features. - """ - return D.nn.relu6(x) - - -class LeakyReLU(object): - - def __init__(self, alpha=0.2): - self.alpha = alpha - - def __call__(self, x): - return D.nn.leaky_relu(x, alpha=self.alpha) - - -def leaky_relu(x): - """ - Compute the Leaky ReLU activation function. - - Parameters - ---------- - x : tensor - representing preactivation values. Must be one of the following types: - float16, float32, float64, int32, int64. - - Returns - ------- - The activation value. - """ - - return D.nn.leaky_relu(x) - - -class Softplus(object): - - def __init__(self): - pass - - def __call__(self, x): - raise NotImplementedError - - -def softplus(x): - """ - Computes softplus: log(exp(features) + 1). - - Parameters - ---------- - x : tensor - Must be one of the following types: half, bfloat16, float32, float64. - - Returns - ------- - A Tensor. Has the same type as features. - """ - - raise NotImplementedError - - -class Tanh(object): - - def __init__(self): - pass - - def __call__(self, x): - return activation_ops.tanh(x) - - -def tanh(x): - """ - Computes hyperbolic tangent of x element-wise. - - Parameters - ---------- - x : tensor - Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128. - - Returns - ------- - A Tensor. Has the same type as x. - """ - - return activation_ops.tanh(x) - - -class Sigmoid(object): - - def __init__(self): - pass - - def __call__(self, x): - return activation_ops.sigmoid(x) - - -def sigmoid(x): - """ - Computes sigmoid of x element-wise. - - Parameters - ---------- - x : tensor - A Tensor with type float16, float32, float64, complex64, or complex128. - - Returns - ------- - A Tensor with the same type as x. - """ - return activation_ops.sigmoid(x) - - -class Softmax(object): - - def __init__(self): - pass - - def __call__(self, x): - return D.nn.softmax(x) - - -def softmax(logits, axis=None): - """ - Computes softmax activations. - - Parameters - ---------- - logits : tensor - Must be one of the following types: half, float32, float64. - axis : int - The dimension softmax would be performed on. The default is -1 which indicates the last dimension. - - Returns - ------- - A Tensor. Has the same type and shape as logits. - """ - return D.nn.softmax(logits) - - -class Dropout(object): - - def __init__(self, keep, seed=1): - self.keep = 1 - keep - self.seed = seed - - def __call__(self, inputs): - return D.nn.dropout(inputs, prob=self.keep) - - -class BiasAdd(object): - """ - Adds bias to value. - - Parameters - ---------- - x : tensor - A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128. - bias : tensor - Must be the same type as value unless value is a quantized type, - in which case a different quantized type may be used. - Returns - ------- - A Tensor with the same type as value. - """ - - def __init__(self, data_format='NHWC'): - self.data_format = data_format - - def __call__(self, x, bias): - inputs = [x, bias] - return vision_ops.bias_add(inputs, data_format=self.data_format) - - -def bias_add(x, bias): - """ - Adds bias to value. - - Parameters - ---------- - x : tensor - A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128. - bias : tensor - Must be the same type as value unless value is a quantized type, - in which case a different quantized type may be used. - data_format : A string. - 'N...C' and 'NC...' are supported. - name : str - A name for the operation (optional). - Returns - ------- - A Tensor with the same type as value. - """ - inputs = [x, bias] - return vision_ops.bias_add(inputs, data_format='NHWC') - - -class Conv1D(object): - pass - # raise NotImplementedError - - -def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None, name=None): - """ - Computes a 1-D convolution given 3-D input and filter tensors. - - Parameters - ---------- - input : tensor - A 3D Tensor. Must be of type float16, float32, or float64 - filters : tensor - A 3D Tensor. Must have the same type as input. - stride : int of list - An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step. - padding : string - 'SAME' or 'VALID' - data_format : string - An optional string from "NWC", "NCW". Defaults to "NWC", the data is stored in the order of - [batch, in_width, in_channels]. The "NCW" format stores data as [batch, in_channels, in_width]. - dilations : int or list - An int or list of ints that has length 1 or 3 which defaults to 1. - The dilation factor for each dimension of input. If set to k > 1, - there will be k-1 skipped cells between each filter element on that dimension. - Dilations in the batch and depth dimensions must be 1. - name : string - A name for the operation (optional). - Returns - ------- - A Tensor. Has the same type as input. - """ - - pass - - -class Conv2D(object): - - def __init__(self, strides, padding, data_format='NHWC', dilations=None, out_channel=None, k_size=None): - self.data_format, self.padding = preprocess_2d_format(data_format, padding) - self.ksize = k_size[0] - if self.data_format is 'NHWC': - self.dg_stride = strides[1] - self.dg_dilation = dilations[1] - elif self.data_format is 'NCHW': - self.dg_stride = strides[2] - self.dg_dilation = dilations[2] - - def __call__(self, inputs, filters): - outputs = vision_ops.conv2d( - [inputs, filters], - kernel_shape=self.ksize, - strides=self.dg_stride, - padding=self.padding, - dilations=self.dg_dilation, - data_format=self.data_format, - ) - return outputs - - -def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None): - """ - Computes a 2-D convolution given 4-D input and filters tensors. - - Parameters - ---------- - input : tensor - Must be one of the following types: half, bfloat16, float32, float64. A 4-D tensor. - The dimension order is interpreted according to the value of data_format, see below for details. - filters : tensor - Must have the same type as input. A 4-D tensor of shape [filter_height, filter_width, in_channels, out_channels] - strides : int of list - The stride of the sliding window for each dimension of input. If a single value is given it is replicated in the H and W dimension. - By default the N and C dimensions are set to 1. The dimension order is determined by the value of data_format, see below for details. - padding : string - "SAME" or "VALID" - data_format : string - "NHWC", "NCHW". Defaults to "NCHW". - dilations : list or ints - list of ints that has length 1, 2 or 4, defaults to 1. The dilation factor for each dimension ofinput. - - Returns - ------- - A Tensor. Has the same type as input. - """ - raise NotImplementedError - - -class Conv3D(object): - pass - # raise NotImplementedError - - -def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None, name=None): - """ - Computes a 3-D convolution given 5-D input and filters tensors. - - Parameters - ---------- - input : tensor - Must be one of the following types: half, bfloat16, float32, float64. - Shape [batch, in_depth, in_height, in_width, in_channels]. - filters : tensor - Must have the same type as input. Shape [filter_depth, filter_height, filter_width, in_channels, out_channels]. - in_channels must match between input and filters. - strides : list of ints - A list of ints that has length >= 5. 1-D tensor of length 5. - The stride of the sliding window for each dimension of input. - Must have strides[0] = strides[4] = 1. - padding : string - A string from: "SAME", "VALID". The type of padding algorithm to use. - data_format : string - An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. - With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. - Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. - dilations : list of ints - Defaults to [1, 1, 1, 1, 1]. 1-D tensor of length 5. The dilation factor for each dimension of input. - If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. - The dimension order is determined by the value of data_format, see above for details. - Dilations in the batch and depth dimensions must be 1. - name : string - A name for the operation (optional). - - Returns - ------- - A Tensor. Has the same type as input. - """ - - raise NotImplementedError - - -def lrn(inputs, depth_radius, bias, alpha, beta): - """ - Local Response Normalization. - - Parameters - ---------- - inputs : tensor - Must be one of the following types: half, bfloat16, float32. 4-D. - depth_radius : int - Defaults to 5. 0-D. Half-width of the 1-D normalization window. - bias : float - Defaults to 1. An offset (usually positive to avoid dividing by 0). - alpha : float - Defaults to 1. A scale factor, usually positive. - beta : float - Defaults to 0.5. An exponent. - - Returns - ------- - A Tensor. Has the same type as input. - """ - pass - - -def moments(x, axes, shift=None, keepdims=False): - """ - Calculates the mean and variance of x. - - Parameters - ---------- - x : tensor - A Tensor - axes : ints - Axes along which to compute mean and variance. - shift : int - Not used in the current implementation. - keepdims : bool - produce moments with the same dimensionality as the input. - - Returns - ------- - Two Tensor objects: mean and variance. - """ - - pass - - -class MaxPool(object): - - def __init__(self, ksize, strides, padding, data_format=None): - self.data_format, self.padding = preprocess_2d_format(data_format, padding) - self.ksize = ksize - self.strides = strides - - def __call__(self, inputs): - return vision_ops.pool2d( - inputs, - kernel_shape=self.ksize, - strides=self.strides, - padding=self.padding, - mode='MAX', - global_pooling=False, - data_format=self.data_format, - ) - - -def max_pool(input, ksize, strides, padding, data_format=None): - """ - Performs the max pooling on the input. - - Parameters - ---------- - input : tensor - Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] if data_format does not start - with "NC" (default), or [batch_size, num_channels] + input_spatial_shape if data_format starts with "NC". - Pooling happens over the spatial dimensions only. - ksize : int or list of ints - An int or list of ints that has length 1, N or N+2. - The size of the window for each dimension of the input tensor. - strides : int or list of ints - An int or list of ints that has length 1, N or N+2. - The stride of the sliding window for each dimension of the input tensor. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - - Returns - ------- - A Tensor of format specified by data_format. The max pooled output tensor. - """ - pass - - -class AvgPool(object): - - def __init__(self, ksize, strides, padding, data_format=None): - self.data_format, self.padding = preprocess_2d_format(data_format, padding) - self.filter_size = ksize - self.strides = strides - - def __call__(self, inputs): - return vision_ops.pool2d( - inputs, - kernel_shape=self.filter_size, - strides=self.strides, - padding=self.padding, - mode='AVG', - global_pooling=False, - data_format=self.data_format, - ) - - -def avg_pool(input, ksize, strides, padding): - """ - Performs the avg pooling on the input. - - Parameters - ---------- - input : tensor - Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] - if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape - if data_format starts with "NC". Pooling happens over the spatial dimensions only. - ksize : int or list of ints - An int or list of ints that has length 1, N or N+2. - The size of the window for each dimension of the input tensor. - strides : int or list of ints - An int or list of ints that has length 1, N or N+2. - The stride of the sliding window for each dimension of the input tensor. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - - Returns - ------- - A Tensor of format specified by data_format. The average pooled output tensor. - """ - pass - - -def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): - """ - Performs the max pooling on the input. - - Parameters - ---------- - input : tensor - A 5-D Tensor of the format specified by data_format. - ksize : int or list of ints - An int or list of ints that has length 1, 3 or 5. - The size of the window for each dimension of the input tensor. - strides : int or list of ints - An int or list of ints that has length 1, 3 or 5. - The stride of the sliding window for each dimension of the input tensor. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - data_format : string - "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. - With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. - Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. - name : string - A name for the operation (optional). - - Returns - ------- - A Tensor of format specified by data_format. The max pooled output tensor. - """ - pass - - -def avg_pool3d(input, ksize, strides, padding, data_format=None, name=None): - """ - Performs the average pooling on the input. - - Parameters - ---------- - input : tensor - A 5-D Tensor of shape [batch, height, width, channels] and type float32, float64, qint8, quint8, or qint32. - ksize : int or list of ints - An int or list of ints that has length 1, 3 or 5. The size of the window for each dimension of the input tensor. - strides : int or list of ints - An int or list of ints that has length 1, 3 or 5. - The stride of the sliding window for each dimension of the input tensor. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - data_format : string - 'NDHWC' and 'NCDHW' are supported. - name : string - Optional name for the operation. - - Returns - ------- - A Tensor with the same type as value. The average pooled output tensor. - """ - pass - - -def pool(input, window_shape, pooling_type, strides=None, padding='VALID', data_format=None, dilations=None, name=None): - """ - Performs an N-D pooling operation. - - Parameters - ---------- - input : tensor - Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] - if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape - if data_format starts with "NC". Pooling happens over the spatial dimensions only. - window_shape : int - Sequence of N ints >= 1. - pooling_type : string - Specifies pooling operation, must be "AVG" or "MAX". - strides : ints - Sequence of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. - padding : string - The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME". - See the "returns" section of tf.ops.convolution for details. - data_format : string - Specifies whether the channel dimension of the input and output is the last dimension (default, or if data_format does not start with "NC"), - or the second dimension (if data_format starts with "NC"). - For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". - For N=3, the valid values are "NDHWC" (default) and "NCDHW". - dilations : list of ints - Dilation rate. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1. - name : string - Optional. Name of the op. - - Returns - ------- - Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] - """ - pass - - -class DepthwiseConv2d(object): - - def __init__(self, strides, padding, data_format=None, dilations=None, ksize=None, channel_multiplier=1): - self.data_format, self.padding = preprocess_2d_format(data_format, padding) - self.stride = strides - self.dilations = dilations - - def __call__(self, input, filter): - raise NotImplementedError("Not implemented depthwiseconv2d") - - -def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilations=None, name=None): - """ - Depthwise 2-D convolution. - - Parameters - ---------- - input : tensor - 4-D with shape according to data_format. - filter : tensor - 4-D with shape [filter_height, filter_width, in_channels, channel_multiplier]. - strides : list - 1-D of size 4. The stride of the sliding window for each dimension of input. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - data_format : string - The data format for input. Either "NHWC" (default) or "NCHW". - dilations : list - 1-D of size 2. The dilation rate in which we sample input values across the height and width dimensions in atrous convolution. - If it is greater than 1, then all values of strides must be 1. - name : string - A name for this operation (optional). - - Returns - ------- - A 4-D Tensor with shape according to data_format. - E.g., for "NHWC" format, shape is [batch, out_height, out_width, in_channels * channel_multiplier]. - """ - - pass - - -def conv1d_transpose( - input, filters, output_shape, strides, padding='SAME', data_format='NWC', dilations=None, name=None -): - """ - The transpose of conv1d. - - Parameters - ---------- - input : tensor - A 3-D Tensor of type float and shape [batch, in_width, in_channels] - for NWC data format or [batch, in_channels, in_width] for NCW data format. - filters : tensor - A 3-D Tensor with the same type as value and shape [filter_width, output_channels, in_channels]. - filter's in_channels dimension must match that of value. - output_shape : tensor - A 1-D Tensor, containing three elements, representing the output shape of the deconvolution op. - strides : list - An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - data_format : string - 'NWC' and 'NCW' are supported. - dilations : list - An int or list of ints that has length 1 or 3 which defaults to 1. - The dilation factor for each dimension of input. If set to k > 1, - there will be k-1 skipped cells between each filter element on that dimension. - Dilations in the batch and depth dimensions must be 1. - name : string - Optional name for the returned tensor. - - Returns - ------- - A Tensor with the same type as value. - """ - pass - - -def conv2d_transpose( - input, filters, output_shape, strides, padding='SAME', data_format='NHWC', dilations=None, name=None -): - """ - The transpose of conv2d. - - Parameters - ---------- - input : tensor - A 4-D Tensor of type float and shape [batch, height, width, in_channels] - for NHWC data format or [batch, in_channels, height, width] for NCHW data format. - filters : tensor - A 4-D Tensor with the same type as input and shape [height, width, - output_channels, in_channels]. filter's in_channels dimension must match that of input. - output_shape : tensor - A 1-D Tensor representing the output shape of the deconvolution op. - strides : list - An int or list of ints that has length 1, 2 or 4. The stride of the sliding window for each dimension of input. - If a single value is given it is replicated in the H and W dimension. - By default the N and C dimensions are set to 0. - The dimension order is determined by the value of data_format, see below for details. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - data_format : string - 'NHWC' and 'NCHW' are supported. - dilations : list - An int or list of ints that has length 1, 2 or 4, defaults to 1. - name : string - Optional name for the returned tensor. - - Returns - ------- - A Tensor with the same type as input. - """ - pass - - -def conv3d_transpose( - input, filters, output_shape, strides, padding='SAME', data_format='NDHWC', dilations=None, name=None -): - """ - The transpose of conv3d. - - Parameters - ---------- - input : tensor - A 5-D Tensor of type float and shape [batch, height, width, in_channels] for - NHWC data format or [batch, in_channels, height, width] for NCHW data format. - filters : tensor - A 5-D Tensor with the same type as value and shape [height, width, output_channels, in_channels]. - filter's in_channels dimension must match that of value. - output_shape : tensor - A 1-D Tensor representing the output shape of the deconvolution op. - strides : list - An int or list of ints that has length 1, 3 or 5. - padding : string - 'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details. - data_format : string - 'NDHWC' and 'NCDHW' are supported. - dilations : list of ints - An int or list of ints that has length 1, 3 or 5, defaults to 1. - name : string - Optional name for the returned tensor. - - Returns - ------- - A Tensor with the same type as value. - """ - - pass - - -class BatchNorm(object): - - def __init__(self): - pass - - def __call__(self, *args, **kwargs): - pass diff --git a/tensorlayer/backend/ops/load_backend.py b/tensorlayer/backend/ops/load_backend.py index 4343507..5b5be55 100644 --- a/tensorlayer/backend/ops/load_backend.py +++ b/tensorlayer/backend/ops/load_backend.py @@ -7,7 +7,7 @@ import sys BACKEND = 'tensorflow' # BACKEND = 'mindspore' -# BACKEND = 'dragon' +# BACKEND = 'paddle' # Check for backend.json files tl_backend_dir = os.path.expanduser('~') @@ -34,7 +34,7 @@ else: else: BACKEND = load_dict['backend'] -# Set backend based on TL_BACKEND flag. +# Set backend based on TL_BACKEND. if 'TL_BACKEND' in os.environ: backend = os.environ['TL_BACKEND'] if backend: @@ -57,20 +57,13 @@ elif BACKEND == 'mindspore': import mindspore.context as context import os os.environ['DEVICE_ID'] = '0' - #context.set_context(mode=context.PYNATIVE_MODE,device_target='GPU'), - context.set_context(mode=context.GRAPH_MODE, device_target='CPU'), + context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU'), + # context.set_context(mode=context.GRAPH_MODE, device_target='CPU'), # enable_task_sink=True, enable_loop_sink=True) # context.set_context(mode=context.GRAPH_MODE, backend_policy='ms', # device_target='Ascend', enable_task_sink=True, enable_loop_sink=True) sys.stderr.write('Using MindSpore backend.\n') -elif BACKEND == 'dragon': - from .dragon_backend import * - from .dragon_nn import * - import dragon as dg - BACKEND_VERSION = dg.__version__ - sys.stderr.write('Using Dragon backend.\n') - elif BACKEND == 'paddle': from .paddle_backend import * from .paddle_nn import * diff --git a/tensorlayer/backend/ops/mindspore_backend.py b/tensorlayer/backend/ops/mindspore_backend.py index b602a4b..5e9c9f1 100644 --- a/tensorlayer/backend/ops/mindspore_backend.py +++ b/tensorlayer/backend/ops/mindspore_backend.py @@ -11,12 +11,13 @@ from mindspore.common.initializer import ( initializer, Constant, Normal, TruncatedNormal, Initializer, _assignment, _calculate_in_and_out, One, Zero ) from mindspore.common.tensor import Tensor -from mindspore._c_expression import Tensor as Tensor_ from mindspore.ops import operations as P from mindspore.ops import functional as F from mindspore.ops import composite as C import mindspore.context as context from mindspore.nn import Cell +from mindspore.ops import count_nonzero +import mindspore.numpy as msnp import numpy as np from scipy.stats import truncnorm @@ -588,6 +589,10 @@ def convert_to_tensor(value, dtype=None): return Tensor(value, dtype=dtype) +def convert_to_numpy(value): + return value.asnumpy() + + def sqrt(x): """ Computes square root of x element-wise. @@ -611,7 +616,7 @@ class ReduceSum(Cell): def __init__(self, axis): super(ReduceSum, self).__init__() self.axis = axis - self.reduce_sum = P.ReduceSum(keep_dims=True) + self.reduce_sum = P.ReduceSum(keep_dims=False) def construct(self, input): return self.reduce_sum(input, self.axis) @@ -919,7 +924,7 @@ class Cast(Cell): self.cast = P.Cast() def construct(self, input): - return self.cast(input, dtype=self.dtype) + return self.cast(input, self.dtype) def cast(x, dtype): @@ -1047,6 +1052,12 @@ def split(value, num_or_size_splits, axis=0, num=None): pass +class Floor(Cell): + + def __call__(self, *args, **kwargs): + raise NotImplementedError + + def floor(x): return NotImplementedError @@ -1087,44 +1098,79 @@ class L2Normalize(Cell): super(L2Normalize, self).__init__() pass - def __call__(self, input, *args, **kwargs): + def construct(self, input, *args, **kwargs): pass class EmbeddingLookup(Cell): - def __init__(self, max_norm=None): + def __init__(self, max_norm=0): + super(EmbeddingLookup, self).__init__() self.max_norm = max_norm + self.embedding_lookup = P.EmbeddingLookup() - def __call__(self, params, ids, *args, **kwargs): - pass + def construct(self, params, ids, *args, **kwargs): + return self.embedding_lookup(params, ids, self.max_norm) -class NCELoss(object): +class NCELoss(Cell): def __init__(self, num_true=1, sampled_values=None, remove_accidental_hits=False): super(NCELoss, self).__init__() - - def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes): pass + def construct(self, weights, biases, labels, inputs, num_sampled, num_classes): + raise NotImplementedError + -class Not_equal(object): +class NotEqual(Cell): def __init__(self): - pass + super(NotEqual, self).__init__() + self.not_equal = P.NotEqual() - def __call__(self, x, y): - pass + def construct(self, x, y): + outputs = self.not_equal(x, y) + return outputs -class Count_nonzero(object): +class CountNonzero(object): def __init__(self, keepdims=None, dtype=int64): - pass + self.keepdims = keepdims + self.dtype = dtype - def __call__(self, *args, **kwargs): - pass + def __call__(self, input, axis=None): + input = self.convert_dtype(input) + return count_nonzero(x=input, axis=axis, keep_dims=self.keepdims, dtype=self.dtype) + + def bool_convert_to_tensor(self, x): + x = x.asnumpy() + shapes = x.shape + b = np.ones(shapes) + if len(shapes) == 1: + for i in range(shapes - 1): + if x[i] ==True: + b[i] = 1 + else: + b[i] = 0 + if len(shapes) == 2: + for i in range(shapes[0] - 1): + for j in range(shapes[1] - 1): + if x[i][j] ==True: + b[i][j] = 1 + else: + b[i][j] = 0 + return Tensor(b, dtype=float32) + + def convert_dtype(self, input): + if input.shape == 1 and type(input[0]) is bool: + output = self.bool_convert_to_tensor(input) + elif input.shape == 2 and type(input[0][0]) is bool: + output = self.bool_convert_to_tensor(input) + else: + output = input + return output class Resize(Cell): @@ -1208,6 +1254,16 @@ class Sign(Cell): return self.sign(x) +class Ceil(Cell): + + def __init__(self): + super(Ceil, self).__init__() + self.ceil = P.Ceil() + + def construct(self, x): + return self.ceil(x) + + def ceil(x): _ceil = P.Ceil() return _ceil(x) @@ -1218,7 +1274,7 @@ def multiply(x, y): def divide(x, y): - raise NotImplementedError + return msnp.divide(x, y) def identity(x): diff --git a/tensorlayer/backend/ops/mindspore_nn.py b/tensorlayer/backend/ops/mindspore_nn.py index 6e6619e..2d75604 100644 --- a/tensorlayer/backend/ops/mindspore_nn.py +++ b/tensorlayer/backend/ops/mindspore_nn.py @@ -1,18 +1,20 @@ #! /usr/bin/python # -*- coding: utf-8 -*- - from __future__ import absolute_import, division, print_function -from mindspore.nn.cell import Cell -from mindspore import context +import itertools import mindspore as ms import mindspore.ops as P +from mindspore import context +from mindspore.nn.cell import Cell +from mindspore._checkparam import Rel from mindspore.ops import functional as F -from mindspore.communication.management import get_group_size, get_rank from mindspore.communication import management -from mindspore._checkparam import check_int_positive +from mindspore.ops.operations import _inner_ops as inner from mindspore._extends import cell_attr_register from mindspore.ops._grad.grad_base import bprop_getters +from mindspore._checkparam import Validator as validator +from mindspore.communication.management import get_group_size, get_rank def padding_format(padding): @@ -537,25 +539,17 @@ class Conv2D(Cell): if self.data_format is 'NHWC': self.ms_stride = strides[1] self.ms_dilation = dilations[1] - # self.transpose = P.Transpose() elif self.data_format is 'NCHW': self.ms_stride = strides[2] self.ms_dilation = dilations[2] - # print(out_channel, k_size, self.padding, self.ms_stride, self.ms_dilation) self.conv2d = P.Conv2D( out_channel=out_channel, kernel_size=k_size, pad_mode=self.padding, stride=self.ms_stride, - dilation=self.ms_dilation, mode=1, group=1 + dilation=self.ms_dilation, mode=1, group=1, data_format=self.data_format ) def construct(self, inputs, filters): - if self.data_format == 'NHWC': - inputs = nhwc_to_nchw(inputs) - outputs = self.conv2d(inputs, filters) - - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) return outputs @@ -588,8 +582,27 @@ def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None) class Conv3D(Cell): - pass - # raise NotImplementedError + + def __init__(self, strides, padding, data_format='NDHWC', dilations=None, out_channel=None, k_size=None): + super(Conv3D, self).__init__() + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + + if self.data_format is 'NDHWC': + self.ms_stride = strides[1] + self.ms_dilation = dilations[1] + raise NotImplementedError("The optional value for data format. Currently only support “NCDHW”.") + elif self.data_format is 'NCDHW': + self.ms_stride = strides[2] + self.ms_dilation = dilations[2] + + self.conv3d = P.Conv3D( + out_channel=out_channel, kernel_size=k_size, pad_mode=self.padding, stride=self.ms_stride, + dilation=self.ms_dilation, data_format=data_format + ) + + def construct(self, input, filters): + outputs = self.conv3d(input, filters) + return outputs def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None, name=None): @@ -677,23 +690,48 @@ def moments(x, axes, shift=None, keepdims=False): pass +class MaxPool1d(Cell): + + def __init__(self, ksize, strides, padding, data_format=None): + super(MaxPool1d, self).__init__() + self.data_format, padding = preprocess_1d_format(data_format=data_format, padding=padding) + self.expand = P.ExpandDims() + _strides = (1, strides[0]) + _ksize = (1, ksize[0]) + if self.data_format == 'NWC': + self.squeeze = P.Squeeze(1) + _data_format = 'NHWC' + if self.data_format == 'NCW': + self.squeeze = P.Squeeze(2) + _data_format = 'NCHW' + + self.max_pool = P.MaxPool(kernel_size=_ksize, strides=_strides, pad_mode=padding, data_format=_data_format) + + def construct(self, inputs): + if self.data_format == 'NWC': + x = self.expand(inputs, 1) + if self.data_format == 'NCW': + x = self.expand(inputs, 2) + output = self.max_pool(x) + output = self.squeeze(output) + return output + + class MaxPool(Cell): def __init__(self, ksize, strides, padding, data_format=None): super(MaxPool, self).__init__() - self.data_format, self.padding = preprocess_2d_format(data_format=data_format, padding=padding) - ms_ksize = ksize[1] - ms_strides = strides[1] - self.maxpool = P.MaxPool(ksize=ms_ksize, strides=ms_strides, padding=self.padding) + data_format, padding = preprocess_2d_format(data_format=data_format, padding=padding) - def construct(self, inputs): - if self.data_format == 'NHWC': - inputs = nhwc_to_nchw(inputs) + if data_format == 'NHWC': + _strides = (strides[1], strides[2]) + if data_format == 'NCHW': + _strides = (strides[2], strides[3]) - outputs = self.maxpool(inputs) + self.maxpool = P.MaxPool(kernel_size=ksize, strides=_strides, pad_mode=padding, data_format=data_format) - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) + def construct(self, inputs): + outputs = self.maxpool(inputs) return outputs @@ -710,7 +748,7 @@ def max_pool(input, ksize, strides, padding, data_format=None): ksize : int or list of ints An int or list of ints that has length 1, N or N+2. The size of the window for each dimension of the input tensor. - strides : int or list of ints + strides : list or list of ints An int or list of ints that has length 1, N or N+2. The stride of the sliding window for each dimension of the input tensor. padding : string @@ -722,17 +760,54 @@ def max_pool(input, ksize, strides, padding, data_format=None): """ data_format, padding = preprocess_2d_format(data_format=data_format, padding=padding) if data_format == 'NHWC': - input = nhwc_to_nchw(input) - - ms_ksize = ksize[1] - ms_strides = strides[2] - outputs = P.MaxPool(ksize=ms_ksize, strides=ms_strides, padding=padding)(input) - # channel first to channel last - if data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) + _strides = (strides[1], strides[2]) + if data_format == 'NCHW': + _strides = (strides[2], strides[3]) + outputs = P.MaxPool(kernel_size=ksize, strides=_strides, pad_mode=padding, data_format=data_format)(input) return outputs +class AvgPool1d(Cell): + + def __init__(self, ksize, strides, padding, data_format=None): + super(AvgPool1d, self).__init__() + self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding) + self.kernel_size = (1, ksize[0]) + self.stride = (1, strides[0]) + + if self.data_format == 'NWC': + _data_format = 'NHWC' + self.squeeze = P.Squeeze(1) + if self.data_format == 'NCW': + _data_format = 'NCHW' + self.squeeze = P.Squeeze(2) + + self.avg_pool = P.AvgPool( + kernel_size=self.kernel_size, strides=self.stride, pad_mode=self.padding, data_format=_data_format + ) + self.reduce_mean = P.ReduceMean(keep_dims=True) + self.slice = P.Slice() + self.expand = P.ExpandDims() + self.shape = P.Shape() + + def construct(self, inputs): + x = inputs + batch, channel, width = self.shape(inputs) + if width == self.kernel_size[1]: + x = self.reduce_mean(x, 2) + elif width - self.kernel_size[1] < self.stride[1]: + x = self.slice(x, (0, 0, 0), (batch, channel, self.kernel_size[1])) + x = self.reduce_mean(x, 2) + else: + if self.data_format == 'NCW': + x = self.expand(x, 2) + if self.data_format == 'NWC': + x = self.expand(x, 1) + x = self.avg_pool(x) + x = self.squeeze(x) + return x + + class AvgPool(Cell): def __init__(self, ksize, strides, padding, data_format=None): @@ -740,16 +815,10 @@ class AvgPool(Cell): self.data_format, self.padding = preprocess_2d_format(data_format=data_format, padding=padding) ms_ksize = ksize[1] ms_strides = strides[1] - self.avgpool = P.AvgPool(ksize=ms_ksize, strides=ms_strides, padding=padding) + self.avgpool = P.AvgPool(ksize=ms_ksize, strides=ms_strides, padding=padding, data_format=self.data_format) def construct(self, inputs): - if self.data_format == 'NHWC': - inputs = nhwc_to_nchw(inputs) - outputs = self.avgpool(inputs) - - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) return outputs @@ -783,6 +852,24 @@ def avg_pool(input, ksize, strides, padding): return outputs(input) +class MaxPool3d(Cell): + + def __init__(self, ksize, strides, padding, data_format=None): + super(MaxPool3d, self).__init__() + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + if data_format == 'NDHWC': + _strides = (strides[1], strides[2], strides[3]) + if data_format == 'NCDHW': + _strides = (strides[2], strides[3], strides[4]) + self.max_pool3d = P.MaxPool3D( + kernel_size=ksize, strides=_strides, padding=padding, data_format=self.data_format + ) + + def __call__(self, inputs): + outputs = self.max_pool3d(inputs) + return outputs + + def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): """ Performs the max pooling on the input. @@ -813,6 +900,21 @@ def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): pass +class AvgPool3d(Cell): + + def __init__(self, ksize, strides, padding, data_format=None): + super(AvgPool3d, self).__init__() + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + if data_format == 'NDHWC': + _strides = (strides[1], strides[2], strides[3]) + if data_format == 'NCDHW': + _strides = (strides[2], strides[3], strides[4]) + raise NotImplementedError + + def __call__(self, inputs): + pass + + def avg_pool3d(input, ksize, strides, padding, data_format=None, name=None): """ Performs the average pooling on the input. @@ -929,22 +1031,27 @@ def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilation class Conv1d_transpose(Cell): - def __init__(self, strides, padding, data_format, dilations=None, out_channel=None, k_size=None, in_channels=None): + def __init__(self, stride, padding, data_format, dilations=None, out_channel=None, k_size=None, in_channels=None): super(Conv1d_transpose, self).__init__() self.data_format, self.padding = preprocess_1d_format(data_format, padding) self.in_channels = in_channels self.out_channel = out_channel - self.strides = (1, strides) + self.stride = (1, stride) self.dilations = (1, dilations) self.k_size = (1, k_size) - + if self.data_format == 'NWC': + self.data_format = 'NHWC' + self.h_axis = 1 + else: + self.data_format = 'NCHW' + self.h_axis = 2 self.conv2d_transpose = P.Conv2DBackpropInput( - out_channel=self.in_channels, kernel_size=self.k_size, pad_mode=self.padding, stride=self.strides, - dilation=self.dilations, mode=1, group=1 + out_channel=self.in_channels, kernel_size=self.k_size, pad_mode=self.padding, stride=self.stride, + dilation=self.dilations, mode=1, group=1, data_format=self.data_format ) self.shape = P.Shape() self.expand_dims = P.ExpandDims() - self.squeeze = P.Squeeze(2) + self.squeeze = P.Squeeze(self.h_axis) def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size): length = 0 @@ -958,19 +1065,22 @@ class Conv1d_transpose(Cell): return length def construct(self, x, filters): - if self.data_format == 'NWC': - x = nhwc_to_nchw(x) - x = self.expand_dims(x, 2) - filters = self.expand_dims(filters, 2) - n, _, h, w = self.shape(x) - h_out = self._deconv_output_length(h, self.k_size[0], self.strides[0], self.dilations[0]) - w_out = self._deconv_output_length(w, self.k_size[1], self.strides[1], self.dilations[1]) - output = self.conv2d_transpose(x, filters, (n, self.out_channel, h_out, w_out)) + x = self.expand_dims(x, self.h_axis) + filters = self.expand_dims(filters, self.h_axis) + if self.data_format == 'NCHW': + n, _, h, w = self.shape(x) + else: + n, h, w, _ = self.shape(x) + h_out = self._deconv_output_length(h, self.k_size[0], self.stride[0], self.dilations[0]) + w_out = self._deconv_output_length(w, self.k_size[1], self.stride[1], self.dilations[1]) + if self.data_format == 'NCHW': + output_size = (n, self.out_channel, h_out, w_out) + else: + output_size = (n, h_out, w_out, self.out_channel) + output = self.conv2d_transpose(x, filters, output_size) output = self.squeeze(output) - if self.data_format == 'NWC': - output = nchw_to_nhwc(output) return output @@ -1018,18 +1128,13 @@ class Conv2d_transpose(Cell): self.data_format, self.padding = preprocess_2d_format(data_format, padding) self.in_channels = in_channels self.out_channel = out_channel - self.k_size = k_size - if self.data_format == 'NHWC': - self.strides = (strides[1], strides[2]) - self.dilations = (dilations[1], dilations[2]) - elif self.data_format == 'NCHW': - self.strides = (strides[2], strides[3]) - self.dilations = (dilations[2], dilations[3]) + self.strides = strides + self.dilations = dilations self.conv2d_transpose = P.Conv2DBackpropInput( out_channel=self.in_channels, kernel_size=self.k_size, pad_mode=self.padding, stride=self.strides, - dilation=self.dilations, mode=1, group=1 + dilation=self.dilations, mode=1, group=1, data_format=self.data_format ) self.shape = P.Shape() @@ -1046,17 +1151,45 @@ class Conv2d_transpose(Cell): def construct(self, x, filters): if self.data_format == 'NHWC': - x = nhwc_to_nchw(x) - - n, _, h, w = self.shape(x) - - h_out = self._deconv_output_length(h, self.k_size[0], self.strides[0], self.dilations[0]) - w_out = self._deconv_output_length(w, self.k_size[1], self.strides[1], self.dilations[1]) - - output = self.conv2d_transpose(x, filters, (n, self.out_channel, h_out, w_out)) + h_axis, w_axis = 1, 2 + n, h, w, _ = self.shape(x) + else: + h_axis, w_axis = 2, 3 + n, _, h, w = self.shape(x) - if self.data_format == 'NHWC': - output = nchw_to_nhwc(x) + if isinstance(self.strides, int): + strides_h = self.strides + strides_w = self.strides + else: + strides_list = list(self.strides) + if len(strides_list) == 2: + strides_h = strides_list[0] + strides_w = strides_list[1] + elif len(strides_list) == 4: + strides_h = strides_list[h_axis] + strides_w = strides_list[w_axis] + + if self.dilations is not None: + if isinstance(self.dilations, int): + dilations_h = self.dilations + dilations_w = self.dilations + else: + dilations_list = list(self.dilations) + if len(dilations_list) == 2: + dilations_h = dilations_list[0] + dilations_w = dilations_list[1] + elif len(dilations_list) == 4: + dilations_h = dilations_list[h_axis] + dilations_w = dilations_list[w_axis] + + h_out = self._deconv_output_length(h, self.k_size[0], strides_h, dilations_h) + w_out = self._deconv_output_length(w, self.k_size[1], strides_w, dilations_w) + + if self.data_format == 'NCHW': + output_size = (n, self.out_channel, h_out, w_out) + else: + output_size = (n, h_out, w_out, self.out_channel) + output = self.conv2d_transpose(x, filters, output_size) return output @@ -1099,7 +1232,22 @@ def conv2d_transpose( class Conv3d_transpose(Cell): - pass + + def __init__( + self, strides, padding, data_format='NDHWC', dilations=None, name=None, out_channel=None, k_size=None, + in_channels=None + ): + super(Conv3d_transpose, self).__init__() + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + + self.conv3d_transpose = P.Conv3DTranspose( + in_channel=in_channels, out_channel=out_channel, kernel_size=k_size, mode=1, pad_mode=self.padding, + stride=strides, dilation=dilations, data_format=self.data_format + ) + + def construct(self, input, filters): + output = self.conv3d_transpose(input, filters) + return output def conv3d_transpose( @@ -1143,16 +1291,22 @@ class BatchNorm(Cell): @cell_attr_register def __init__( self, num_features, epsilon=1e-5, decay=0.9, gamma=None, beta=None, moving_mean=None, moving_var=None, - is_train=None, device_num_each_group=1, data_format='channels_last' + is_train=None, device_num_each_group=1, process_groups=0, data_format='NCHW' ): super(BatchNorm, self).__init__() + if data_format in ["channels_last", "NHWC", "nhwc"]: + data_format = "NHWC" + elif data_format in ["channels_first", "NCHW", "nchw"]: + data_format = "NCHW" + validator.check_value_type('num_features', num_features, [int], self.cls_name) if num_features < 1: raise ValueError("num_features must be at least 1") if decay < 0 or decay > 1: raise ValueError("momentum should be a number in range [0, 1], but got {}".format(decay)) - - self.data_format = data_format + self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name) + if context.get_context("device_target") != "GPU" and self.format == "NHWC": + raise ValueError("NHWC format only support in GPU target.") self.use_batch_statistics = is_train self.num_features = num_features self.eps = epsilon @@ -1160,19 +1314,47 @@ class BatchNorm(Cell): self.moving_variance = moving_var self.gamma = gamma self.beta = beta - self.group = check_int_positive(device_num_each_group) + self.group_device_num = validator.check_positive_int(device_num_each_group) + self.process_groups = process_groups self.is_global = False - if self.group != 1: + self.parallel_mode = context.get_auto_parallel_context("parallel_mode") + global SYNC_BN_GROUP_NAME + # for GlobalBatchNorm + if self.group_device_num != 1: self.rank_id = get_rank() self.rank_size = get_group_size() self.device_list = [i for i in range(0, self.rank_size)] - self.rank_list = self.list_group(self.device_list, self.group) + self.rank_list = self.list_group(self.device_list, self.group_device_num) self.rank_list_idx = len(self.rank_list) for i in range(self.rank_list_idx): - if self.rank_id in self.rank_list[i] and self.group != 1: + if self.rank_id in self.rank_list[i]: self.is_global = True - management.create_group('group' + str(i), self.rank_list[i]) - self.all_reduce = P.AllReduce(P.ReduceOp.SUM, 'group' + str(i)).add_prim_attr('fusion', 1) + if SYNC_BN_GROUP_NAME == "": + SYNC_BN_GROUP_NAME = "sync_bn_group" + str(i) + management.create_group(SYNC_BN_GROUP_NAME, self.rank_list[i]) + # for SyncBatchNorm + if self.process_groups != 0: + self.rank_id = get_rank() + self.rank_size = get_group_size() + if self.process_groups is not None: + validator.check_isinstance("process_groups", self.process_groups, list) + self._check_rank_ids(self.process_groups, self.rank_size) + for i in range(len(self.process_groups)): + validator.check_isinstance("process_groups[" + str(i) + "]", self.process_groups[i], list) + self.group_device_num = len(self.process_groups[i]) + if self.rank_id in self.process_groups[i] and self.group_device_num > 1: + self.is_global = True + if SYNC_BN_GROUP_NAME == "": + SYNC_BN_GROUP_NAME = "sync_bn_group" + str(i) + management.create_group(SYNC_BN_GROUP_NAME, self.process_groups[i]) + elif self.rank_size > 1: + self.is_global = True + self.group_device_num = self.rank_size + self.device_list = [i for i in range(0, self.rank_size)] + if SYNC_BN_GROUP_NAME == "": + SYNC_BN_GROUP_NAME = "sync_bn_group0" + management.create_group(SYNC_BN_GROUP_NAME, self.device_list) + self.shape = P.Shape() self.reduce_mean = P.ReduceMean(keep_dims=True) self.square = P.Square() @@ -1180,8 +1362,7 @@ class BatchNorm(Cell): self.cast = P.Cast() self.dtype = P.DType() self.reshape = P.Reshape() - self.is_ascend = context.get_context("device_target") == "Ascend" - self.is_gpu = context.get_context("device_target") == "GPU" + self._target = context.get_context("device_target") self.is_graph_mode = context.get_context("mode") == context.GRAPH_MODE self.momentum = 1.0 - decay if context.get_context("enable_ge"): @@ -1189,16 +1370,13 @@ class BatchNorm(Cell): else: self.is_ge_backend = False - if self.is_graph_mode and (self.is_ge_backend or self.is_ascend): - self.bn_train = P.BatchNorm(is_training=True, epsilon=self.eps) - elif self.is_gpu: - self.bn_train = P.FusedBatchNormEx(mode=1, epsilon=self.eps, momentum=self.momentum) - else: - self.bn_train = P.FusedBatchNorm(mode=1, epsilon=self.eps, momentum=self.momentum) - self.bn_infer = P.BatchNorm(is_training=False, epsilon=self.eps) - self.enable_global_sync = self.is_global and (self.is_ge_backend or (self.is_graph_mode and self.is_ascend)) - self.enable_default_train = self.is_graph_mode and not self.is_global and \ - (self.is_ge_backend or self.is_ascend) + self.bn_train = P.BatchNorm(is_training=True, epsilon=self.eps, momentum=self.momentum, data_format=self.format) + if self.is_global: + self.bn_train = inner.SyncBatchNorm( + epsilon=self.eps, momentum=self.momentum, group=SYNC_BN_GROUP_NAME, device_num=self.group_device_num + ) + + self.bn_infer = P.BatchNorm(is_training=False, epsilon=self.eps, data_format=self.format) data_parallel_strategy = ((1, ), (1, )) data_parallel_strategy_one = ((1, ), ()) @@ -1209,9 +1387,6 @@ class BatchNorm(Cell): self.assign_sub_mean = P.AssignSub().shard(data_parallel_strategy) self.assign_sub_var = P.AssignSub().shard(data_parallel_strategy) - def _check_data_dim(self, x): - raise NotImplementedError - def list_group(self, world_rank, group_size): if group_size > get_group_size(): raise ValueError( @@ -1224,101 +1399,37 @@ class BatchNorm(Cell): group_list = [list(i) for i in world_rank_list] return group_list - def _global_sync(self, x, axes, re_shape): - """calculate global batch normalization output""" - x_mean = self.reduce_mean(x, axes) - x_mean_square = self.reduce_mean(self.square(x), axes) - global_batch_mean = self.all_reduce(x_mean) / self.group - global_batch_mean_square = self.all_reduce(x_mean_square) / self.group - global_mean = global_batch_mean - global_var = global_batch_mean_square - self.square(global_mean) - var_sqrt = self.sqrt(global_var + self.eps) - mean_first = (x - global_mean) / var_sqrt - y = mean_first * self.reshape(self.gamma, re_shape) + self.reshape(self.beta, re_shape) - - mean_sub = self.sub_mean(self.reshape(self.moving_mean, re_shape), global_mean) - tmp_mean = self.mul_mean(mean_sub, self.cast(self.momentum, self.dtype(mean_sub))) - mean_sub2 = self.sub_var(self.reshape(self.moving_mean, re_shape), global_var) - tmp_variance = self.mul_var(mean_sub2, self.cast(self.momentum, self.dtype(mean_sub2))) - y = F.depend(y, self.assign_sub_mean(self.moving_mean, self.reshape(tmp_mean, self.shape(self.moving_mean)))) - y = F.depend( - y, self.assign_sub_var(self.moving_variance, self.reshape(tmp_variance, self.shape(self.moving_variance))) - ) - return y - - def get_dim(self, input): - dim = len(self.shape(input)) - if dim == 2: - return '1d' - elif dim == 4: - return '2d' - else: - raise ValueError("The input must has 2 dims or 4 dims.") - - def _shape_check_bn(self, in_shape, in_dims): - dim = len(in_shape) - if in_dims == '1d' and dim != 2: - raise ValueError("The input must has 2 dims.") - if in_dims == '2d' and dim != 4: - raise ValueError("The input must has 4 dims.") - if in_dims == 'both' and dim != 2 and dim != 4: - raise ValueError("The input must has 2 dims or 4 dims.") - - def _shape_infer(self, x_shape, num_feature): - """global batch normalization shape and axes infer""" - if len(x_shape) == 4: - axes = (0, 2, 3) - re_shape = (1, num_feature, 1, 1) - else: - axes = (0, ) - re_shape = (1, num_feature) - return axes, re_shape + def _check_rank_ids(self, process_groups, rank_size): + seen = set() + for rid in itertools.chain(*process_groups): + validator.check_int_range(rid, 0, rank_size, Rel.INC_LEFT, "rank id in process_groups") + if rid in seen: + raise ValueError("rank id in process_groups should not be duplicated.") + seen.add(rid) def construct(self, inputs): - x = inputs - self._shape_check_bn(self.shape(x), self.get_dim(x)) - if self.use_batch_statistics is None: - flag = self.training - else: - flag = self.use_batch_statistics + x_shape = F.shape(inputs) + if len(x_shape) == 5: + inputs = self.reshape(inputs, (x_shape[0], x_shape[1], x_shape[2] * x_shape[3], x_shape[4])) + + flag = self.use_batch_statistics if flag: - if self.enable_global_sync: - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - x = nhwc_to_nchw(x) - axes, re_shape = self._shape_infer(F.shape(x), self.num_features) - y = self._global_sync(x, axes, re_shape) - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - y = nchw_to_nhwc(y) - return y - - if self.enable_default_train: - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - x = nhwc_to_nchw(x) - y, batch_mean, batch_var, _, _ = self.bn_train(x, self.gamma, self.beta, None, None) - - mean_sub = self.sub_mean(self.moving_mean, batch_mean) - temp_mean = self.mul_mean(mean_sub, self.momentum) - mean_sub2 = self.sub_var(self.moving_variance, batch_var) - temp_variance = self.mul_var(mean_sub2, self.momentum) - y = F.depend(y, self.assign_sub_mean(self.moving_mean, temp_mean)) - y = F.depend(y, self.assign_sub_var(self.moving_variance, temp_variance)) - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - y = nchw_to_nhwc(y) - return y - - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - x = nhwc_to_nchw(x) - y = self.bn_train(x, self.gamma, self.beta, self.moving_mean, self.moving_variance)[0] - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - y = nchw_to_nhwc(y) - return y - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - x = nhwc_to_nchw(x) - y = self.bn_infer(x, self.gamma, self.beta, self.moving_mean, self.moving_variance)[0] - if self.data_format == 'channels_last' and self.get_dim(x) == '2d': - y = nchw_to_nhwc(y) - return y + output = self.bn_train(inputs, self.gamma, self.beta, self.moving_mean, self.moving_variance)[0] + + if len(x_shape) == 5: + output = self.reshape(output, x_shape) + return output + + output = self.bn_infer(inputs, self.gamma, self.beta, self.moving_mean, self.moving_variance)[0] + if len(x_shape) == 5: + output = self.reshape(output, x_shape) + return output + + def extend_repr(self): + return 'num_features={}, eps={}, momentum={}, gamma={}, beta={}, moving_mean={}, moving_variance={}'.format( + self.num_features, self.eps, self.momentum, self.gamma, self.beta, self.moving_mean, self.moving_variance + ) class GroupConv2D(Cell): @@ -1337,17 +1448,11 @@ class GroupConv2D(Cell): self.conv2d = P.Conv2D( out_channel=out_channel, kernel_size=k_size, pad_mode=self.padding, stride=self.ms_stride, - dilation=self.ms_dilation, mode=1, group=groups + dilation=self.ms_dilation, mode=1, group=groups, data_format=self.data_format ) def construct(self, inputs, filters): - if self.data_format == 'NHWC': - inputs = nhwc_to_nchw(inputs) - outputs = self.conv2d(inputs, filters) - - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) return outputs @@ -1407,30 +1512,24 @@ class SeparableConv2D(Cell): if self.data_format is 'NHWC': self.ms_stride = strides[1] self.ms_dilation = dilations[1] - # self.transpose = P.Transpose() elif self.data_format is 'NCHW': self.ms_stride = strides[2] self.ms_dilation = dilations[2] self.depthwise_conv = P.Conv2D( out_channel=self.in_channel * self.depth_multiplier, kernel_size=self.k_size, pad_mode=self.padding, - stride=self.ms_stride, dilation=self.ms_dilation, mode=1, group=self.in_channel + stride=self.ms_stride, dilation=self.ms_dilation, mode=1, group=self.in_channel, + data_format=self.data_format ) self.pointwise_conv = P.Conv2D( out_channel=self.out_channel, kernel_size=(1, 1), pad_mode=self.padding, stride=(1, 1), dilation=(1, 1), - mode=1, group=1 + mode=1, group=1, data_format=self.data_format ) def construct(self, x, depthwise_filters, pointwise_filters): - if self.data_format == 'NHWC': - x = nhwc_to_nchw(x) - outputs = self.depthwise_conv(x, depthwise_filters) outputs = self.pointwise_conv(outputs, pointwise_filters) - - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) return outputs @@ -1440,26 +1539,27 @@ class AdaptiveMeanPool1D(Cell): super(AdaptiveMeanPool1D, self).__init__() self.data_format, _ = preprocess_1d_format(data_format, None) self.output_size = output_size + if self.data_format == 'NWC': + self.data_format = 'NHWC' + self.h_axis = 1 + else: + self.data_format = 'NCHW' + self.h_axis = 2 self.expand_dims = P.ExpandDims() - self.squeeze = P.Squeeze(2) + self.squeeze = P.Squeeze(self.h_axis) + self.shape = P.Shape() def construct(self, inputs): - - if self.data_format == 'NWC': - n, w, c = inputs.shape - inputs = nhwc_to_nchw(inputs) + if self.data_format == 'NHWC': + n, w, c = self.shape(inputs) else: - n, c, w = inputs.shape - inputs = self.expand_dims(inputs, 2) - + n, c, w = self.shape(inputs) + inputs = self.expand_dims(inputs, self.h_axis) stride = (1, w // self.output_size) kernel = (1, w - (self.output_size - 1) * stride[1]) - outputs = P.AvgPool(kernel_size=kernel, strides=stride, pad_mode='VALID')(inputs) + outputs = P.AvgPool(kernel_size=kernel, strides=stride, pad_mode='VALID', data_format=self.data_format)(inputs) outputs = self.squeeze(outputs) - if self.data_format == 'NWC': - outputs = nchw_to_nhwc(outputs) - return outputs @@ -1469,31 +1569,38 @@ class AdaptiveMeanPool2D(Cell): super(AdaptiveMeanPool2D, self).__init__() self.data_format, _ = preprocess_2d_format(data_format, None) self.output_size = output_size + if self.data_format == 'NHWC': + self.h_axis = 1 + else: + self.h_axis = 2 + self.shape = P.Shape() def construct(self, inputs): - if self.data_format == 'NHWC': - n, h, w, c = inputs.shape - inputs = nhwc_to_nchw(inputs) + n, h, w, c = self.shape(inputs) else: - n, c, h, w = inputs.shape + n, c, h, w = self.shape(inputs) out_h, out_w = self.output_size stride_h = h // out_h kernel_h = h - (out_h - 1) * stride_h stride_w = w // out_w kernel_w = w - (out_w - 1) * stride_w - outputs = P.AvgPool(kernel_size=(kernel_h, kernel_w), strides=(stride_h, stride_w), pad_mode='VALID')(inputs) - - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) + outputs = P.AvgPool( + kernel_size=(kernel_h, kernel_w), strides=(stride_h, stride_w), pad_mode='VALID', + data_format=self.data_format + )(inputs) return outputs class AdaptiveMeanPool3D(Cell): - pass + def __init__(self, output_size, data_format): + pass + + def __call__(self, inputs): + raise NotImplementedError class AdaptiveMaxPool1D(Cell): @@ -1502,26 +1609,28 @@ class AdaptiveMaxPool1D(Cell): super(AdaptiveMaxPool1D, self).__init__() self.data_format, _ = preprocess_1d_format(data_format, None) self.output_size = output_size + if self.data_format == 'NWC': + self.data_format = 'NHWC' + self.h_axis = 1 + else: + self.data_format = 'NCHW' + self.h_axis = 2 self.expand_dims = P.ExpandDims() - self.squeeze = P.Squeeze(2) + self.squeeze = P.Squeeze(self.h_axis) + self.shape = P.Shape() def construct(self, inputs): - if self.data_format == 'NWC': - n, w, c = inputs.shape - inputs = nhwc_to_nchw(inputs) + if self.data_format == 'NHWC': + n, w, c = self.shape(inputs) else: - n, c, w = inputs.shape - inputs = self.expand_dims(inputs, 2) - + n, c, w = self.shape(inputs) + inputs = self.expand_dims(inputs, self.h_axis) stride = (1, w // self.output_size) kernel = (1, w - (self.output_size - 1) * stride[1]) - outputs = P.MaxPool(kernel_size=kernel, strides=stride, pad_mode='VALID')(inputs) + outputs = P.MaxPool(kernel_size=kernel, strides=stride, pad_mode='VALID', data_format=self.data_format)(inputs) outputs = self.squeeze(outputs) - if self.data_format == 'NWC': - outputs = nchw_to_nhwc(outputs) - return outputs @@ -1531,31 +1640,37 @@ class AdaptiveMaxPool2D(Cell): super(AdaptiveMaxPool2D, self).__init__() self.data_format, _ = preprocess_2d_format(data_format, None) self.output_size = output_size + if self.data_format == 'NHWC': + self.h_axis = 1 + else: + self.h_axis = 2 + self.shape = P.Shape() def construct(self, inputs): - if self.data_format == 'NHWC': - n, h, w, c = inputs.shape - inputs = nhwc_to_nchw(inputs) + n, h, w, c = self.shape(inputs) else: - n, c, h, w = inputs.shape - + n, c, h, w = self.shape(inputs) out_h, out_w = self.output_size stride_h = h // out_h kernel_h = h - (out_h - 1) * stride_h stride_w = w // out_w kernel_w = w - (out_w - 1) * stride_w - outputs = P.MaxPool(kernel_size=(kernel_h, kernel_w), strides=(stride_h, stride_w), pad_mode='VALID')(inputs) - - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) + outputs = P.MaxPool( + kernel_size=(kernel_h, kernel_w), strides=(stride_h, stride_w), pad_mode='VALID', + data_format=self.data_format + )(inputs) return outputs class AdaptiveMaxPool3D(Cell): - pass + def __init__(self, output_size, data_format): + pass + + def __call__(self, inputs): + raise NotImplementedError class BinaryConv2D(Cell): @@ -1566,14 +1681,13 @@ class BinaryConv2D(Cell): if self.data_format is 'NHWC': self.ms_stride = strides[1] self.ms_dilation = dilations[1] - # self.transpose = P.Transpose() elif self.data_format is 'NCHW': self.ms_stride = strides[2] self.ms_dilation = dilations[2] self.conv2d = P.Conv2D( out_channel=out_channel, kernel_size=k_size, pad_mode=self.padding, stride=self.ms_stride, - dilation=self.ms_dilation, mode=1, group=1 + dilation=self.ms_dilation, mode=1, group=1, data_format=self.data_format ) @bprop_getters.register(P.Sign) @@ -1590,16 +1704,9 @@ class BinaryConv2D(Cell): def construct(self, inputs, filters): - if self.data_format == 'NHWC': - inputs = nhwc_to_nchw(inputs) - filters = self.sign(filters) - outputs = self.conv2d(inputs, filters) - if self.data_format == 'NHWC': - outputs = nchw_to_nhwc(outputs) - return outputs diff --git a/tensorlayer/backend/ops/paddle_backend.py b/tensorlayer/backend/ops/paddle_backend.py index f7334c0..3573bd2 100644 --- a/tensorlayer/backend/ops/paddle_backend.py +++ b/tensorlayer/backend/ops/paddle_backend.py @@ -342,7 +342,7 @@ def reshape(tensor, shape): ------- A Tensor. Has the same type as tensor """ - raise NotImplementedError + return pd.reshape(tensor, shape) class Concat(object): @@ -372,7 +372,7 @@ def concat(values, axis): raise NotImplementedError -def convert_to_tensor(value, dtype=None): +def convert_to_tensor(value, dtype=float32): """ Converts the given value to a Tensor. @@ -387,7 +387,11 @@ def convert_to_tensor(value, dtype=None): ------- A Tensor based on value. """ - raise NotImplementedError + return pd.to_tensor(value, dtype=dtype) + + +def convert_to_numpy(value): + return value.numpy() def sqrt(x): @@ -418,15 +422,10 @@ class ReduceSum(object): class ReduceMean(object): def __init__(self, axis): - if axis == [1, 2]: - self.data_format = 'NHWC' - elif axis == [2, 3]: - self.data_format = 'NCHW' - else: - raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]") + self.axis = axis def __call__(self, inputs): - raise NotImplementedError + return pd.mean(inputs, axis=self.axis) def reduce_mean(input_tensor, axis=None): @@ -454,15 +453,10 @@ def reduce_mean(input_tensor, axis=None): class ReduceMax(object): def __init__(self, axis): - if axis == [1, 2]: - self.data_format = 'NHWC' - elif axis == [2, 3]: - self.data_format = 'NCHW' - else: - raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]") + self.axis = axis def __call__(self, inputs): - raise NotImplementedError + return pd.max(inputs, axis=self.axis) def reduce_max(input_tensor, axis=None): @@ -817,6 +811,12 @@ def split(value, num_or_size_splits, axis=0, num=None): pass +class Floor(object): + + def __call__(self, *args, **kwargs): + raise NotImplementedError + + def floor(x): raise NotImplementedError @@ -875,7 +875,7 @@ class NCELoss(object): pass -class Not_equal(object): +class NotEqual(object): def __init__(self): pass @@ -884,7 +884,7 @@ class Not_equal(object): pass -class Count_nonzero(object): +class CountNonzero(object): def __init__(self, keepdims=None, dtype="int64"): pass @@ -950,6 +950,12 @@ class Sign(object): raise NotImplementedError +class Ceil(object): + + def __call__(self, *args, **kwargs): + raise NotImplementedError + + def ceil(x): raise NotImplementedError diff --git a/tensorlayer/backend/ops/paddle_nn.py b/tensorlayer/backend/ops/paddle_nn.py index 535b9fa..a2fe790 100644 --- a/tensorlayer/backend/ops/paddle_nn.py +++ b/tensorlayer/backend/ops/paddle_nn.py @@ -46,10 +46,10 @@ def preprocess_1d_format(data_format, padding): str "NWC" or "NCW" and "SAME" or "VALID" """ - if data_format in ["channels_last", "NWC"]: - data_format = "NWC" - elif data_format in ["channels_first", "NCW"]: - data_format = "NCW" + if data_format in ["channels_last", "NWC", "NLC"]: + data_format = "NLC" + elif data_format in ["channels_first", "NCW", "NCL"]: + data_format = "NCL" elif data_format == None: data_format = None else: @@ -128,7 +128,15 @@ def nchw_to_nhwc(x): channels last tensor data """ - pass + if len(x.shape) == 3: + x = pd.transpose(x, (0, 2, 1)) + elif len(x.shape) == 4: + x = pd.transpose(x, (0, 2, 3, 1)) + elif len(x.shape) == 5: + x = pd.transpose(x, (0, 2, 3, 4, 1)) + else: + raise Exception("Unsupported dimensions") + return x def nhwc_to_nchw(x): @@ -145,7 +153,15 @@ def nhwc_to_nchw(x): channels first tensor data """ - pass + if len(x.shape) == 3: + x = pd.transpose(x, (0, 2, 1)) + elif len(x.shape) == 4: + x = pd.transpose(x, (0, 3, 1, 2)) + elif len(x.shape) == 5: + x = pd.transpose(x, (0, 4, 1, 2, 3)) + else: + raise Exception("Unsupported dimensions") + return x class ReLU(object): @@ -338,7 +354,8 @@ class Dropout(object): self.seed = seed def __call__(self, inputs): - raise NotImplementedError + output = F.dropout(inputs, p=self.keep, mode='upscale_in_train') + return output class BiasAdd(object): @@ -357,11 +374,22 @@ class BiasAdd(object): A Tensor with the same type as value. """ - def __init__(self, data_format='NHWC'): - self.data_format = data_format + def __init__(self, data_format='channels_last'): + super(BiasAdd, self).__init__() + if data_format in ['channels_first', 'NCL', 'NCHW', 'NCDHW']: + self.data_format = 'channels_first' + elif data_format in ['channels_last', 'NLC', 'NHWC', 'NDHWC']: + self.data_format = 'channels_last' + else: + raise ("Unsupported data format: " + str(data_format)) def __call__(self, x, bias): - return pd.add(x, bias) + if len(x.shape) > 2 and self.data_format == 'channels_first': + x = nchw_to_nhwc(x) + outputs = pd.add(x, bias) + if len(x.shape) > 2 and self.data_format == 'channels_first': + outputs = nhwc_to_nchw(outputs) + return outputs def bias_add(x, bias): @@ -383,12 +411,26 @@ def bias_add(x, bias): ------- A Tensor with the same type as value. """ - raise NotImplementedError + + #TODO the bias_add only supports channels_last + outputs = pd.add(x, bias) + return outputs class Conv1D(object): - pass - # raise NotImplementedError + + def __init__(self, stride, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None): + super(Conv1D, self).__init__() + self.data_format, self.padding = preprocess_1d_format(padding=padding, data_format=data_format) + self.stride = stride + self.dilations = dilations + + def __call__(self, input, filters): + output = F.conv1d( + x=input, weight=filters, stride=self.stride, dilation=self.dilations, data_format=self.data_format, + padding=self.padding + ) + return output def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None, name=None): @@ -420,23 +462,29 @@ def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None, n A Tensor. Has the same type as input. """ - pass + outputs = F.conv1d( + x=input, weight=filters, stride=stride, padding=padding, data_format=data_format, dilation=dilations, name=name + ) + return outputs class Conv2D(object): def __init__(self, strides, padding, data_format='NHWC', dilations=None, out_channel=None, k_size=None): self.data_format, self.padding = preprocess_2d_format(data_format, padding) - self.ksize = k_size[0] if self.data_format is 'NHWC': - self.dg_stride = strides[1] - self.dg_dilation = dilations[1] + self._stride = (strides[1], strides[2]) + self._dilation = (dilations[1], dilations[2]) elif self.data_format is 'NCHW': - self.dg_stride = strides[2] - self.dg_dilation = dilations[2] + self._stride = (strides[2], strides[3]) + self._dilation = (dilations[2], dilations[3]) def __call__(self, inputs, filters): - raise NotImplementedError + outputs = F.conv2d( + x=inputs, weight=filters, stride=self._stride, dilation=self._dilation, padding=self.padding, + data_format=self.data_format + ) + return outputs def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None): @@ -464,12 +512,36 @@ def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None) ------- A Tensor. Has the same type as input. """ - raise NotImplementedError + data_format, padding = preprocess_2d_format(data_format, padding) + if data_format is 'NHWC': + _stride = (strides[1], strides[2]) + _dilation = (dilations[1], dilations[2]) + elif data_format is 'NCHW': + _stride = (strides[2], strides[3]) + _dilation = (dilations[2], dilations[3]) + outputs = F.conv2d( + x=input, weight=filters, stride=_stride, dilation=_dilation, padding=padding, data_format=data_format + ) + return outputs class Conv3D(object): - pass - # raise NotImplementedError + + def __init__(self, strides, padding, data_format='NDHWC', dilations=None, out_channel=None, k_size=None): + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + if data_format is 'NDHWC': + self._strides = (strides[1], strides[2], strides[3]) + self._dilations = (dilations[1], dilations[2], dilations[3]) + elif data_format is 'NCDHW': + self._strides = (strides[2], strides[3], strides[4]) + self._dilations = (dilations[2], dilations[3], dilations[4]) + + def __call__(self, input, filters): + outputs = F.conv3d( + x=input, weight=filters, stride=self._strides, dilation=self._dilations, data_format=self.data_format, + padding=self.padding + ) + return outputs def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None, name=None): @@ -484,7 +556,7 @@ def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None filters : tensor Must have the same type as input. Shape [filter_depth, filter_height, filter_width, in_channels, out_channels]. in_channels must match between input and filters. - strides : list of ints + strides : tuple of ints A list of ints that has length >= 5. 1-D tensor of length 5. The stride of the sliding window for each dimension of input. Must have strides[0] = strides[4] = 1. @@ -494,7 +566,7 @@ def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data. With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. - dilations : list of ints + dilations : touple of ints Defaults to [1, 1, 1, 1, 1]. 1-D tensor of length 5. The dilation factor for each dimension of input. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of data_format, see above for details. @@ -506,8 +578,18 @@ def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None ------- A Tensor. Has the same type as input. """ - - raise NotImplementedError + data_format, padding = preprocess_3d_format(data_format, padding) + if data_format is 'NDHWC': + _strides = (strides[1], strides[2], strides[3]) + _dilations = (dilations[1], dilations[2], dilations[3]) + elif data_format is 'NCDHW': + _strides = (strides[2], strides[3], strides[4]) + _dilations = (dilations[2], dilations[3], dilations[4]) + outputs = F.conv3d( + x=input, weight=filters, stride=_strides, dilation=_dilations, data_format=data_format, padding=padding, + name=name + ) + return outputs def lrn(inputs, depth_radius, bias, alpha, beta): @@ -557,15 +639,37 @@ def moments(x, axes, shift=None, keepdims=False): pass +class MaxPool1d(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + if self.data_format == 'NLC': + inputs = nhwc_to_nchw(inputs) + outputs = F.max_pool1d(inputs, self.ksize, self.strides, self.padding) + if self.data_format == 'NLC': + outputs = nchw_to_nhwc(outputs) + return outputs + + class MaxPool(object): def __init__(self, ksize, strides, padding, data_format=None): self.data_format, self.padding = preprocess_2d_format(data_format, padding) self.ksize = ksize - self.strides = strides + if self.data_format is 'NHWC': + self._stride = (strides[1], strides[2]) + elif self.data_format is 'NCHW': + self._stride = (strides[2], strides[3]) def __call__(self, inputs): - raise NotImplementedError + outputs = F.max_pool2d( + x=inputs, kernel_size=self.ksize, stride=self._stride, padding=self.padding, data_format=self.data_format + ) + return outputs def max_pool(input, ksize, strides, padding, data_format=None): @@ -594,15 +698,38 @@ def max_pool(input, ksize, strides, padding, data_format=None): pass +class AvgPool1d(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + if self.data_format == 'NLC': + inputs = nhwc_to_nchw(inputs) + outputs = F.avg_pool1d(inputs, self.ksize, self.strides, self.padding) + if self.data_format == 'NLC': + outputs = nchw_to_nhwc(outputs) + return outputs + + class AvgPool(object): def __init__(self, ksize, strides, padding, data_format=None): self.data_format, self.padding = preprocess_2d_format(data_format, padding) self.filter_size = ksize - self.strides = strides + if self.data_format is 'NHWC': + self._stride = (strides[1], strides[2]) + elif self.data_format is 'NCHW': + self._stride = (strides[2], strides[3]) def __call__(self, inputs): - raise NotImplementedError + outputs = F.avg_pool2d( + inputs, kernel_size=self.filter_size, stride=self._stride, padding=self.padding, + data_format=self.data_format + ) + return outputs def avg_pool(input, ksize, strides, padding): @@ -631,6 +758,23 @@ def avg_pool(input, ksize, strides, padding): pass +class MaxPool3d(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + self.ksize = ksize + if self.data_format == 'NCDHW': + self.strides = (strides[2], strides[3], strides[4]) + if self.data_format == 'NDHWC': + self.strides = (strides[1], strides[2], strides[3]) + + def __call__(self, inputs): + outputs = F.max_pool3d( + inputs, kernel_size=self.ksize, stride=self.strides, padding=self.padding, data_format=self.data_format + ) + return outputs + + def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): """ Performs the max pooling on the input. @@ -661,6 +805,23 @@ def max_pool3d(input, ksize, strides, padding, data_format=None, name=None): pass +class AvgPool3d(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + self.ksize = ksize + if self.data_format == 'NCDHW': + self.strides = (strides[2], strides[3], strides[4]) + if self.data_format == 'NDHWC': + self.strides = (strides[1], strides[2], strides[3]) + + def __call__(self, inputs): + outputs = F.avg_pool3d( + inputs, kernel_size=self.ksize, stride=self.strides, padding=self.padding, data_format=self.data_format + ) + return outputs + + def avg_pool3d(input, ksize, strides, padding, data_format=None, name=None): """ Performs the average pooling on the input. @@ -769,18 +930,26 @@ def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilation class Conv1d_transpose(object): def __init__( - self, strides, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None, in_channels=None + self, stride, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None, in_channels=None ): - self.strides = strides + self.stride = stride self.dilations = dilations self.data_format, self.padding = preprocess_1d_format(data_format, padding) def __call__(self, input, filters): - raise NotImplementedError + out = F.conv1d_transpose( + x=input, + weight=filters, + padding=self.padding, + stride=self.stride, + dilation=self.dilations, + data_format=self.data_format, + ) + return out def conv1d_transpose( - input, filters, output_shape, strides, padding='SAME', data_format='NWC', dilations=None, name=None + input, filters, output_shape, stride, padding='SAME', data_format='NWC', dilations=None, name=None ): """ The transpose of conv1d. @@ -813,7 +982,17 @@ def conv1d_transpose( ------- A Tensor with the same type as value. """ - pass + data_format, padding = preprocess_1d_format(data_format, padding) + output = F.conv1d_transpose( + x=input, + weight=filters, + stride=stride, + padding=padding, + dilation=dilations, + data_format=data_format, + output_size=output_shape, + ) + return output class Conv2d_transpose(object): @@ -824,11 +1003,14 @@ class Conv2d_transpose(object): ): self.strides = strides self.dilations = dilations - self.name = name self.data_format, self.padding = preprocess_2d_format(data_format, padding) def __call__(self, input, filters): - raise NotImplementedError + output = F.conv2d_transpose( + x=input, weight=filters, stride=self.strides, padding=self.padding, dilation=self.dilations, + data_format=self.data_format + ) + return output def conv2d_transpose( @@ -865,7 +1047,17 @@ def conv2d_transpose( ------- A Tensor with the same type as input. """ - pass + data_format, padding = preprocess_2d_format(data_format, padding) + output = F.conv2d_transpose( + x=input, + weight=filters, + output_size=output_shape, + stride=strides, + padding=padding, + dilation=dilations, + data_format=data_format, + ) + return output class Conv3d_transpose(object): @@ -876,12 +1068,14 @@ class Conv3d_transpose(object): ): self.strides = strides self.dilations = dilations - self.name = name - self.out_channel = out_channel self.data_format, self.padding = preprocess_3d_format(data_format, padding) def __call__(self, input, filters): - raise NotImplementedError + + output = F.conv3d_transpose( + x=input, weight=filters, stride=self.strides, padding=self.padding, dilation=self.dilations, + data_format=self.data_format + ) def conv3d_transpose( @@ -915,17 +1109,63 @@ def conv3d_transpose( ------- A Tensor with the same type as value. """ - - pass + data_format, padding = preprocess_3d_format(data_format, padding) + output = F.conv3d_transpose( + x=input, + weight=filters, + output_size=output_shape, + stride=strides, + padding=padding, + dilation=dilations, + data_format=data_format, + ) + return output class BatchNorm(object): - def __init__(self): - pass + def __init__( + self, decay=0.9, epsilon=0.00001, beta=None, gamma=None, moving_mean=None, moving_var=None, num_features=None, + data_format='channels_last', is_train=False + ): + self.decay = decay + self.epsilon = epsilon + self.data_format = data_format + self.beta = beta + self.gamma = gamma + self.moving_mean = moving_mean + self.moving_var = moving_var + self.num_features = num_features + self.is_train = is_train + self.axes = None - def __call__(self, *args, **kwargs): - raise NotImplementedError + def __call__(self, inputs): + data_format = self.channel_format(inputs) + outputs = pd.nn.functional.batch_norm( + inputs, self.moving_mean, self.moving_var, weight=self.gamma, bias=self.beta, training=self.is_train, + momentum=self.decay, epsilon=self.epsilon, data_format=data_format + ) + return outputs + + def channel_format(self, inputs): + """ return "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". """ + len_in_shape = len(inputs.shape) + if len_in_shape == 2: + return 'NC' + if self.data_format == 'channels_last': + if len_in_shape == 3: + return 'NLC' + if len_in_shape == 4: + return 'NHWC' + if len_in_shape == 5: + return 'NDHWC' + if self.data_format == 'channels_first': + if len_in_shape == 3: + return 'NCL' + if len_in_shape == 4: + return 'NCHW' + if len_in_shape == 5: + return 'NCDHW' class GroupConv2D(object): @@ -958,58 +1198,98 @@ class SeparableConv2D(object): class AdaptiveMeanPool1D(object): def __init__(self, output_size, data_format): - pass + self.data_format, _ = preprocess_1d_format(data_format, None) + self.output_size = output_size def __call__(self, input): - raise NotImplementedError + if self.data_format == 'NLC': + input = nhwc_to_nchw(input) + + output = F.adaptive_avg_pool1d(input, self.output_size) + + if self.data_format == 'NLC': + output = nchw_to_nhwc(output) + + return output class AdaptiveMeanPool2D(object): def __init__(self, output_size, data_format): - pass + self.data_format, _ = preprocess_2d_format(data_format, None) + self.output_size = output_size def __call__(self, inputs): - raise NotImplementedError + return F.adaptive_avg_pool2d(inputs, output_size=self.output_size, data_format=self.data_format) class AdaptiveMeanPool3D(object): def __init__(self, output_size, data_format): - pass + self.data_format, _ = preprocess_3d_format(data_format, None) + self.output_size = output_size def __call__(self, inputs): - raise NotImplementedError + + return F.adaptive_avg_pool3d(inputs, output_size=self.output_size, data_format=self.data_format) class AdaptiveMaxPool1D(object): def __init__(self, output_size, data_format): - pass + + self.data_format, _ = preprocess_1d_format(data_format, None) + self.output_size = output_size def __call__(self, input): - raise NotImplementedError + if self.data_format == 'NLC': + input = nhwc_to_nchw(input) + + output = F.adaptive_max_pool1d(input, self.output_size) + + if self.data_format == 'NLC': + output = nchw_to_nhwc(output) + + return output class AdaptiveMaxPool2D(object): def __init__(self, output_size, data_format): - pass + self.data_format, _ = preprocess_2d_format(data_format, None) + self.output_size = output_size def __call__(self, inputs): - raise NotImplementedError + if self.data_format == 'NHWC': + inputs = nhwc_to_nchw(inputs) + + output = F.adaptive_max_pool2d(inputs, self.output_size) + + if self.data_format == 'NHWC': + output = nchw_to_nhwc(output) + + return output class AdaptiveMaxPool3D(object): def __init__(self, output_size, data_format): - pass + self.data_format, _ = preprocess_3d_format(data_format, None) + self.output_size = output_size def __call__(self, inputs): - raise NotImplementedError + if self.data_format == 'NDHWC': + inputs = nhwc_to_nchw(inputs) + + output = F.adaptive_max_pool3d(inputs, self.output_size) + + if self.data_format == 'NDHWC': + output = nchw_to_nhwc(output) + + return output class BinaryConv2D(object): diff --git a/tensorlayer/backend/ops/tensorflow_backend.py b/tensorlayer/backend/ops/tensorflow_backend.py index 9d9a00f..99a0b31 100644 --- a/tensorlayer/backend/ops/tensorflow_backend.py +++ b/tensorlayer/backend/ops/tensorflow_backend.py @@ -414,6 +414,10 @@ def convert_to_tensor(value, dtype=None): return tf.convert_to_tensor(value, dtype) +def convert_to_numpy(value): + return value.numpy() + + def sqrt(x): """ Computes square root of x element-wise. @@ -845,6 +849,12 @@ def split(value, num_or_size_splits, axis=0, num=None): return tf.split(value=value, num_or_size_splits=num_or_size_splits, axis=axis, num=num) +class Floor(object): + + def __call__(self, x): + return tf.floor(x) + + def floor(x): return tf.floor(x) @@ -917,7 +927,7 @@ class NCELoss(object): return outputs -class Not_equal(object): +class NotEqual(object): def __init__(self): pass @@ -926,7 +936,7 @@ class Not_equal(object): return tf.not_equal(x, y) -class Count_nonzero(object): +class CountNonzero(object): def __init__(self, keepdims=None, dtype=int64): self.keepdims = keepdims @@ -997,6 +1007,12 @@ class Sign(object): return tf.sign(x) +class Ceil(object): + + def __call__(self, x): + return tf.math.ceil(x) + + def ceil(x): return tf.math.ceil(x) diff --git a/tensorlayer/backend/ops/tensorflow_nn.py b/tensorlayer/backend/ops/tensorflow_nn.py index 5cefda3..d8b2d73 100644 --- a/tensorlayer/backend/ops/tensorflow_nn.py +++ b/tensorlayer/backend/ops/tensorflow_nn.py @@ -652,6 +652,20 @@ def moments(x, axes, shift=None, keepdims=False): return outputs +class MaxPool1d(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + outputs = tf.nn.max_pool( + input=inputs, ksize=self.ksize, strides=self.strides, padding=self.padding, data_format=self.data_format + ) + return outputs + + class MaxPool(object): def __init__(self, ksize, strides, padding, data_format=None): @@ -711,6 +725,25 @@ def max_pool(input, ksize, strides, padding, data_format=None): return outputs +class AvgPool1d(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + outputs = tf.nn.pool( + input=inputs, + window_shape=self.ksize, + pooling_type="AVG", + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + ) + return outputs + + class AvgPool(object): def __init__(self, ksize, strides, padding, data_format=None): @@ -762,6 +795,24 @@ def avg_pool(input, ksize, strides, padding): return outputs +class MaxPool3d(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + outputs = tf.nn.max_pool3d( + input=inputs, + ksize=self.ksize, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + ) + return outputs + + def max_pool3d(input, ksize, strides, padding, data_format=None): """ Performs the max pooling on the input. @@ -801,6 +852,24 @@ def max_pool3d(input, ksize, strides, padding, data_format=None): return outputs +class AvgPool3d(object): + + def __init__(self, ksize, strides, padding, data_format=None): + self.data_format, self.padding = preprocess_3d_format(data_format, padding) + self.ksize = ksize + self.strides = strides + + def __call__(self, inputs): + outputs = tf.nn.avg_pool3d( + input=inputs, + ksize=self.ksize, + strides=self.strides, + padding=self.padding, + data_format=self.data_format, + ) + return outputs + + def avg_pool3d(input, ksize, strides, padding, data_format=None): """ Performs the average pooling on the input. @@ -953,9 +1022,9 @@ def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilation class Conv1d_transpose(object): def __init__( - self, strides, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None, in_channels=None + self, stride, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None, in_channels=None ): - self.strides = strides + self.stride = stride self.dilations = dilations self.data_format, self.padding = preprocess_1d_format(data_format, padding) @@ -973,10 +1042,10 @@ class Conv1d_transpose(object): output_channels = filters_shape[1] dilations_w = 1 - if isinstance(self.strides, int): - strides_w = self.strides + if isinstance(self.stride, int): + strides_w = self.stride else: - strides_list = list(self.strides) + strides_list = list(self.stride) strides_w = strides_list[w_axis] if self.dilations is not None: @@ -1002,7 +1071,7 @@ class Conv1d_transpose(object): input=input, filters=filters, output_shape=output_shape, - strides=self.strides, + strides=self.stride, padding=self.padding, data_format=self.data_format, dilations=self.dilations, @@ -1089,10 +1158,10 @@ class Conv2d_transpose(object): strides_w = self.strides else: strides_list = list(self.strides) - if len(strides_list) != 4: + if len(strides_list) == 2: strides_h = strides_list[0] strides_w = strides_list[1] - else: + elif len(strides_list) == 4: strides_h = strides_list[h_axis] strides_w = strides_list[w_axis] @@ -1102,10 +1171,10 @@ class Conv2d_transpose(object): dilations_w = self.dilations else: dilations_list = list(self.dilations) - if len(dilations_list) != 4: + if len(dilations_list) == 2: dilations_h = dilations_list[0] dilations_w = dilations_list[1] - else: + elif len(dilations_list) == 4: dilations_h = dilations_list[h_axis] dilations_w = dilations_list[w_axis] @@ -1212,12 +1281,12 @@ class Conv3d_transpose(object): strides_d, strides_h, strides_w = self.strides else: strides_list = list(self.strides) - if len(strides_list) != 5: + if len(strides_list) == 3: strides_d, strides_h, strides_w = \ strides_list[0], \ strides_list[1], \ strides_list[2] - else: + elif len(strides_list) == 5: strides_d, strides_h, strides_w = \ strides_list[d_axis], \ strides_list[h_axis], \ @@ -1228,12 +1297,12 @@ class Conv3d_transpose(object): dilations_d, dilations_h, dilations_w = self.dilations else: dilations_list = list(self.dilations) - if len(dilations_list) != 5: + if len(dilations_list) == 3: dilations_d, dilations_h, dilations_w = \ dilations_list[0], \ dilations_list[1], \ dilations_list[2] - else: + elif len(dilations_list) == 5: dilations_d, dilations_h, dilations_w = \ dilations_list[d_axis],\ dilations_list[h_axis], \ diff --git a/tensorlayer/cost/__init__.py b/tensorlayer/cost/__init__.py index 3ca7c2c..9a7cbdd 100644 --- a/tensorlayer/cost/__init__.py +++ b/tensorlayer/cost/__init__.py @@ -7,8 +7,6 @@ if BACKEND == 'tensorflow': from .tensorflow_cost import * elif BACKEND == 'mindspore': from .mindspore_cost import * -elif BACKEND == 'dragon': - pass elif BACKEND == 'paddle': from .paddle_cost import * else: diff --git a/tensorlayer/cost/mindspore_cost.py b/tensorlayer/cost/mindspore_cost.py index 694c5fc..4ebb2cd 100644 --- a/tensorlayer/cost/mindspore_cost.py +++ b/tensorlayer/cost/mindspore_cost.py @@ -6,7 +6,7 @@ from mindspore.nn import Cell import mindspore.ops as P __all__ = [ - 'cross_entropy', + 'softmax_cross_entropy_with_logits', 'sigmoid_cross_entropy', 'binary_cross_entropy', 'mean_squared_error', @@ -25,24 +25,9 @@ __all__ = [ 'maxnorm_i_regularizer', ] -cross_entropy = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') +softmax_cross_entropy_with_logits = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') - -def sigmoid_cross_entropy(output, target, name=None): - """Sigmoid cross-entropy operation, see ``tf.ops.sigmoid_cross_entropy_with_logits``. - - Parameters - ---------- - output : Tensor - A batch of distribution with shape: [batch_size, num of classes]. - target : Tensor - A batch of index with shape: [batch_size, ]. - name : string - Name of this loss. - - """ - outputs = P.ReduceMean(cross_entropy(output, target)) - return outputs +sigmoid_cross_entropy = P.SigmoidCrossEntropyWithLogits() def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'): @@ -73,40 +58,7 @@ def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'): raise NotImplementedError("Not Implemented.") -def mean_squared_error(output, target, is_mean=False, axis=-1, name="mean_squared_error"): - """Return the TensorFlow expression of mean-square-error (L2) of two batch of data. - - Parameters - ---------- - output : Tensor - 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel]. - target : Tensor - The target distribution, format the same with `output`. - is_mean : boolean - Whether compute the mean or sum for each example. - - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data. - - If False, use ``tf.reduce_sum`` (default). - axis : int or list of int - The dimensions to reduce. - name : str - An optional name to attach to this function. - - References - ------------ - - `Wiki Mean Squared Error `__ - - """ - # with tf.name_scope(name): - # if len(output.shape) == 2: # [batch_size, n_feature] - # axis = 1 - # elif len(output.shape) == 3: # [batch_size, w, h] - # axis = [1, 2] - # elif len(output.shape) == 4: # [batch_size, w, h, c] - # axis = [1, 2, 3] - # else: - # raise Exception("Unknow dimension") - - return nn.MSELoss()(output, target) +mean_squared_error = nn.MSELoss() def normalized_mean_square_error(output, target, axis=-1, name="normalized_mean_squared_error_loss"): diff --git a/tensorlayer/cost/paddle_cost.py b/tensorlayer/cost/paddle_cost.py index cd66fa7..09e0af3 100644 --- a/tensorlayer/cost/paddle_cost.py +++ b/tensorlayer/cost/paddle_cost.py @@ -5,7 +5,7 @@ import paddle.nn.functional as F import paddle as pd __all__ = [ - 'cross_entropy', + 'softmax_cross_entropy_with_logits', 'sigmoid_cross_entropy', 'binary_cross_entropy', 'mean_squared_error', @@ -24,7 +24,8 @@ __all__ = [ 'maxnorm_i_regularizer', ] -def cross_entropy(output, target): + +def softmax_cross_entropy_with_logits(output, target): """Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions, it implements softmax internally. See ``tf.ops.sparse_softmax_cross_entropy_with_logits``. @@ -40,7 +41,7 @@ def cross_entropy(output, target): Examples -------- >>> import tensorlayer as tl - >>> ce = tl.cost.cross_entropy(y_logits, y_target_logits) + >>> ce = tl.cost.softmax_cross_entropy_with_logits(y_logits, y_target_logits) References ----------- @@ -70,8 +71,8 @@ def sigmoid_cross_entropy(output, target): pass else: depth = output.shape[-1] - label = pd.fluid.layers.one_hot(target, depth=depth) - out = pd.fluid.layers.sigmoid_cross_entropy_with_logits(x=output, label=label) + target = pd.fluid.layers.one_hot(target, depth=depth) + out = pd.fluid.layers.sigmoid_cross_entropy_with_logits(x=output, label=target) out = pd.fluid.layers.reduce_mean(out) return out @@ -102,8 +103,8 @@ def binary_cross_entropy(output, target, epsilon=1e-8): depth = output.shape[-1] target = pd.fluid.layers.one_hot(target, depth=depth) out = pd.fluid.layers.reduce_sum( - -(target * pd.log(output + epsilon) + (1. - target) * pd.log(1. - output + epsilon)) - ) + -(target * pd.log(output + epsilon) + (1. - target) * pd.log(1. - output + epsilon)) + ) return out @@ -192,7 +193,6 @@ def absolute_difference_error(output, target, is_mean=False, axis=-1, name="abso """ - if is_mean: loss = pd.fluid.layers.reduce_mean(pd.fluid.layers.reduce_mean(pd.abs(output - target), axis)) else: @@ -600,4 +600,4 @@ def huber_loss( """ - raise NotImplementedError("Not Implemented.") \ No newline at end of file + raise NotImplementedError("Not Implemented.") diff --git a/tensorlayer/cost/tensorflow_cost.py b/tensorlayer/cost/tensorflow_cost.py index b07acad..1cab86b 100644 --- a/tensorlayer/cost/tensorflow_cost.py +++ b/tensorlayer/cost/tensorflow_cost.py @@ -10,7 +10,7 @@ from tensorflow.python.ops import array_ops, math_ops, nn_ops, standard_ops from tensorlayer import logging __all__ = [ - 'cross_entropy', + 'softmax_cross_entropy_with_logits', 'sigmoid_cross_entropy', 'binary_cross_entropy', 'mean_squared_error', @@ -30,7 +30,7 @@ __all__ = [ ] -def cross_entropy(output, target, name=None): +def softmax_cross_entropy_with_logits(output, target, name=None): """Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions, it implements softmax internally. See ``tf.ops.sparse_softmax_cross_entropy_with_logits``. @@ -46,7 +46,7 @@ def cross_entropy(output, target, name=None): Examples -------- >>> import tensorlayer as tl - >>> ce = tl.cost.cross_entropy(y_logits, y_target_logits, 'my_loss') + >>> ce = tl.cost.softmax_cross_entropy_with_logits(y_logits, y_target_logits, 'my_loss') References ----------- @@ -236,7 +236,7 @@ def dice_coe(output, target, loss_type='jaccard', axis=(1, 2, 3), smooth=1e-5): Examples --------- >>> import tensorlayer as tl - >>> outputs = tl.act.pixel_wise_softmax(outputs) + >>> outputs = tl.ops.softmax(outputs) >>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_) References @@ -492,20 +492,21 @@ def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details= >>> vocab_size = 10000 >>> embedding_size = 256 >>> ni = tl.layers.Input([batch_size, None], dtype=tf.int64) - >>> net = tl.layers.Embedding( + >>> net_lits = [] + >>> net_list.append(tl.layers.Embedding( ... vocabulary_size = vocab_size, ... embedding_size = embedding_size, - ... name = 'seq_embedding')(ni) - >>> net = tl.layers.RNN( + ... name = 'seq_embedding')) + >>> net_list.append(tl.layers.RNN( ... cell =tf.keras.layers.LSTMCell(units=embedding_size, dropout=0.1), ... return_seq_2d = True, - ... name = 'dynamicrnn')(net) - >>> net = tl.layers.Dense(n_units=vocab_size, name="output")(net) - >>> model = tl.models.Model(inputs=ni, outputs=net) + ... name = 'dynamicrnn')) + >>> net_list.append(tl.layers.Dense(n_units=vocab_size, name="output")) + >>> model = tl.layers.SequentialLayer(net_list) >>> input_seqs = np.random.randint(0, 10, size=(batch_size, 10), dtype=np.int64) >>> target_seqs = np.random.randint(0, 10, size=(batch_size, 10), dtype=np.int64) >>> input_mask = np.random.randint(0, 2, size=(batch_size, 10), dtype=np.int64) - >>> outputs = model(input_seqs, is_train=True) + >>> outputs = model(input_seqs) >>> loss = tl.cost.cross_entropy_seq_with_mask(outputs, target_seqs, input_mask) """ diff --git a/tensorlayer/dataflow/__init__.py b/tensorlayer/dataflow/__init__.py index 912a238..3eb1282 100644 --- a/tensorlayer/dataflow/__init__.py +++ b/tensorlayer/dataflow/__init__.py @@ -3,7 +3,6 @@ from __future__ import absolute_import, division, print_function from tensorlayer.backend.ops.load_backend import BACKEND -from tensorlayer.dataflow import image if BACKEND == 'tensorflow': from .tensorflow_data import * diff --git a/tensorlayer/dataflow/dataflow_examples.py b/tensorlayer/dataflow/dataflow_examples.py deleted file mode 100644 index 2bee246..0000000 --- a/tensorlayer/dataflow/dataflow_examples.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- - -import tensorlayer as tl -from tensorlayer.dataflow import Dataset -import numpy as np - -X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) - - -def generator_train(): - inputs = X_train - targets = y_train - if len(inputs) != len(targets): - raise AssertionError("The length of inputs and targets should be equal") - for _input, _target in zip(inputs, targets): - # yield _input.encode('utf-8'), _target.encode('utf-8') - yield (_input, np.array(_target)) - - -batch_size = 128 -shuffle_buffer_size = 128 -n_epoch = 10 - -import tensorflow as tf - - -def _map_fn_train(img, target): - # 1. Randomly crop a [height, width] section of the image. - img = tf.image.random_crop(img, [24, 24, 3]) - # 2. Randomly flip the image horizontally. - img = tf.image.random_flip_left_right(img) - # 3. Randomly change brightness. - img = tf.image.random_brightness(img, max_delta=63) - # 4. Randomly change contrast. - img = tf.image.random_contrast(img, lower=0.2, upper=1.8) - # 5. Subtract off the mean and divide by the variance of the pixels. - img = tf.image.per_image_standardization(img) - target = tf.reshape(target, ()) - return img, target - - -import multiprocessing -train_ds = Dataset.from_generator( - generator=generator_train, output_types=(tl.float32, tl.int32) -) # , output_shapes=((24, 24, 3), (1))) - -train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) - -train_ds = train_ds.repeat(n_epoch) -train_ds = train_ds.shuffle(shuffle_buffer_size) -train_ds = train_ds.prefetch(buffer_size=4096) -train_ds = train_ds.batch(batch_size) - -for X_batch, y_batch in train_ds: - print(X_batch.shape, y_batch.shape) diff --git a/tensorlayer/dataflow/image/mindspore_image.py b/tensorlayer/dataflow/image/mindspore_image.py deleted file mode 100644 index 9f10c7d..0000000 --- a/tensorlayer/dataflow/image/mindspore_image.py +++ /dev/null @@ -1,1539 +0,0 @@ -import numpy as np -from PIL import Image, ImageOps, ImageEnhance, __version__ -import random -import colorsys -import numbers -import math -import io -__all__ = [ - 'CentralCrop', - 'HsvToRgb', - 'AdjustBrightness', - 'AdjustContrast', - 'AdjustHue', - 'AdjustSaturation', - 'Crop', - 'FlipHorizontal', - 'FlipVertical', - 'GrayToRgb', - 'Standardization', - 'RgbToGray', - 'PadToBoundingbox', - 'Pad', - 'RandomBrightness', - 'RandomContrast', - 'RandomHue', - 'RandomSaturation', - 'RandomCrop', - 'Resize', - 'CropAndResize', - 'CropOrPad', - 'ResizeAndPad', - 'RgbToHsv', - 'Transpose', - 'RandomRotation', - 'RandomShift', - 'RandomShear', - 'RandomZoom', - 'Rescale', - 'RandomFlipVertical', - 'RandomFlipHorizontal', - 'HWC2CHW', - 'CHW2HWC', -] - -augment_error_message = 'img should be PIL image. Got {}.' - - -def ToTensor(image): - - image = np.asarray(image).astype(np.float32) - return image - - -def ToPIL(image): - """ - Convert the input image to PIL format. - - Args: - img: Image to be converted. - - Returns: - img (PIL image), Converted image. - """ - return Image.fromarray(np.array(image).astype(np.uint8)) - - -def Decode(image): - """ - Decode the input image to PIL image format in RGB mode. - - Args: - img: Image to be decoded. - - Returns: - img (PIL image), Decoded image in RGB mode. - """ - - try: - data = io.BytesIO(image) - img = Image.open(data) - return img.convert('RGB') - except IOError as e: - raise ValueError("{0}\nWARNING: Failed to decode given image.".format(e)) - except AttributeError as e: - raise ValueError("{0}\nWARNING: Failed to decode, Image might already be decoded.".format(e)) - - -def Crop(image, offset_height, offset_width, target_height, target_width, is_hwc=True): - ''' - - Parameters - ---------- - image: - A image or a batch of images - offset_height: - Vertical coordinate of the top-left corner of the result in the input. - offset_width: - Horizontal coordinate of the top-left corner of the result in the input. - target_height: - Height of the result. - target_width: - Width of the result. - is_hwc: - If is_hwc is True, the order of image channels is [B,H,W,C] or [H,W,C]. If is_hwc is False, the order of image channels is [B,C,H,W] or [C,H,W,] - Returns: - Output [batch, target_height, target_width, channels] or [target_height, target_width, channels] - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = len(image.shape) - - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - if shape_size == 3: - if is_hwc: - height, width, channels = image.shape - else: - channels, height, width = image.shape - else: - if is_hwc: - batch, height, width, channels = image.shape - else: - batch, channels, height, width = image.shape - - if offset_width < 0: - raise ValueError('offset_width must be >0.') - if offset_height < 0: - raise ValueError('offset_height must be >0.') - if target_height < 0: - raise ValueError('target_height must be >0.') - if target_width < 0: - raise ValueError('target_width must be >0.') - if offset_width + target_width > width: - raise ValueError('offset_width + target_width must be <= image width.') - if offset_height + target_height > height: - raise ValueError('offset_height + target_height must be <= image height.') - - if shape_size == 3: - if is_hwc: - return ToTensor( - image[offset_height:offset_height + target_height, offset_width:offset_width + target_width, :] - ) - else: - return ToTensor( - image[:, offset_height:offset_height + target_height, offset_width:offset_width + target_width] - ) - else: - if is_hwc: - return ToTensor( - image[:, offset_height:offset_height + target_height, offset_width:offset_width + target_width, :] - ) - else: - return ToTensor( - image[:, :, offset_height:offset_height + target_height, offset_width:offset_width + target_width] - ) - - -def CentralCrop(image, central_fraction=None, size=None, is_hwc=True): - ''' - - Parameters - ---------- - image : - input Either a 3-D float Tensor of shape [height, width, depth] or a 4-D Tensor of shape [batch, height, width, depth], - central_fraction : - float (0, 1], fraction of size to crop - size: - size (Union[int, sequence]) – The output size of the cropped image. If size is an integer, a square crop of size (size, size) is returned. - If size is a sequence of length 2, it should be (height, width). - Returns : - 3-D float Tensor or 4-D float Tensor, as per the input. - ------- - If backend is tensorflow, central_fraction will be used preferentially. if size is used, the height-width ratio will be equivalent to original ratio.. - If backend is mindspore, size will be used preferentially. - ''' - if size is None and central_fraction is None: - raise ValueError('central_fraction and size can not be both None') - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = len(image.shape) - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - - if shape_size == 3: - if is_hwc: - height, width, channels = image.shape - else: - channels, height, width = image.shape - else: - if is_hwc: - batch, height, width, channels = image.shape - else: - batch, channels, height, width = image.shape - if size is None: - if central_fraction > 1 or central_fraction <= 0: - raise ValueError('central_fraction must be in (0,1].') - target_height = int(round(height * central_fraction)) - target_width = int(round(width * central_fraction)) - size = (target_height, target_width) - if isinstance(size, int): - size = (size, size) - crop_height, crop_width = size - crop_top = int(round((height - crop_height) / 2.)) - crop_left = int(round((width - crop_width) / 2.)) - - return Crop(image, crop_top, crop_left, crop_height, crop_width, is_hwc) - - -def hsv_to_rgb(np_hsv_img, is_hwc): - """ - Convert HSV img to RGB img. - - Args: - np_hsv_img (numpy.ndarray): NumPy HSV image array of shape (H, W, C) or (C, H, W) to be converted. - is_hwc (Bool): If True, the shape of np_hsv_img is (H, W, C), otherwise must be (C, H, W). - - Returns: - np_rgb_img (numpy.ndarray), NumPy HSV image with same shape of np_hsv_img. - """ - if is_hwc: - h, s, v = np_hsv_img[:, :, 0], np_hsv_img[:, :, 1], np_hsv_img[:, :, 2] - else: - h, s, v = np_hsv_img[0, :, :], np_hsv_img[1, :, :], np_hsv_img[2, :, :] - to_rgb = np.vectorize(colorsys.hsv_to_rgb) - r, g, b = to_rgb(h, s, v) - - if is_hwc: - axis = 2 - else: - axis = 0 - np_rgb_img = np.stack((r, g, b), axis=axis) - return np_rgb_img - - -def HsvToRgb(image, is_hwc=True): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = len(image.shape) - - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - if shape_size == 3: - batch_size = 0 - if is_hwc: - num_channels = image.shape[2] - else: - num_channels = image.shape[0] - else: - batch_size = image.shape[0] - if is_hwc: - num_channels = image.shape[3] - else: - num_channels = image.shape[1] - - if num_channels != 3: - raise TypeError('img should be 3 channels RGB img. Got {} channels'.format(num_channels)) - if batch_size == 0: - return hsv_to_rgb(image, is_hwc) - return ToTensor([hsv_to_rgb(img, is_hwc) for img in image]) - - -def AdjustBrightness(image, factor): - ''' - - Parameters - ---------- - image: - input NumPy image array or PIL image - factor: - factor should be in the range (-1,1) - Returns: - ------- - np darray image - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if factor >= 1 or factor <= -1: - raise ValueError('factor must be in (-1,1).') - image = image + factor * 255 - image = np.clip(image, 0, 255) - - return ToTensor(image) - - -def AdjustContrast(image, factor): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - image = ImageEnhance.Contrast(image).enhance(factor) - - return ToTensor(image) - - -def AdjustHue(image, factor): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - image_hue_factor = factor - if not -1 <= image_hue_factor <= 1: - raise ValueError('image_hue_factor {} is not in [-1, 1].'.format(image_hue_factor)) - - mode = image.mode - if mode in {'L', '1', 'I', 'F'}: - return image - - hue, saturation, value = image.convert('HSV').split() - - np_hue = np.array(hue, dtype=np.uint8) - - with np.errstate(over='ignore'): - np_hue += np.uint8(image_hue_factor * 255) - hue = Image.fromarray(np_hue, 'L') - - image = Image.merge('HSV', (hue, saturation, value)).convert(mode) - - return ToTensor(image) - - -def AdjustSaturation(image, factor): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - enhancer = ImageEnhance.Color(image) - image = enhancer.enhance(factor) - - return ToTensor(image) - - -def FlipHorizontal(image): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - - image = np.fliplr(image) - - return image - - -def FlipVertical(image): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - image = np.flipud(image) - - return image - - -def GrayToRgb(image): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape = image.shape - output_image = np.zeros((shape[0], shape[1], 3), dtype=np.uint8) - if len(shape) == 3: - for i in range(3): - output_image[:, :, i] = image[:, :, 1] - elif len(shape) == 2: - for i in range(3): - output_image[:, :, i] = image - - return ToTensor(output_image) - - -def RgbToGray(image): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - ''' - 将彩色图像转换为灰度(模式“L”)时,库使用ITU-R 601-2 Luma转换: - L = R * 299/1000 + G * 587/1000 + B * 114/1000 - ''' - image = image.convert('L') - return ToTensor(image) - - -def PadToBoundingbox(image, offset_height, offset_width, target_height, target_width, padding_value=0, is_hwc=True): - ''' - - Parameters - ---------- - image: - A 3-D numpy ndarray or 4-D numpy ndarray image - offset_height: - Number of rows of zeros to add on top. - offset_width: - Number of columns of zeros to add on the left. - target_height: - Height of output image. - target_width - Width of output image. - Returns - A numpy ndarray image - ------- - ''' - - if offset_height < 0: - raise ValueError("offset_height must be >= 0") - if offset_width < 0: - raise ValueError("offset_width must be >= 0") - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = len(image.shape) - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - if shape_size == 3: - if is_hwc: - height, width, channels = image.shape - else: - channels, height, width = image.shape - else: - if is_hwc: - batch, height, width, channels = image.shape - else: - batch, channels, height, width = image.shape - top = offset_height - bottom = target_height - height - top - left = offset_width - right = target_width - width - left - - if bottom < 0: - raise ValueError("target_height must be >= offset_height + height") - - if right < 0: - raise ValueError("target_width must be >= offset_width + width") - - if shape_size == 3: - if is_hwc: - return ToTensor( - np.pad( - image, ((top, bottom), (left, right), (0, 0)), mode='constant', - constant_values=(padding_value, padding_value) - ) - ) - else: - return ToTensor( - np.pad( - image, ((0, 0), (top, bottom), (left, right)), mode='constant', - constant_values=(padding_value, padding_value) - ) - ) - else: - if is_hwc: - return ToTensor( - np.pad( - image, ((0, 0), (top, bottom), (left, right), (0, 0)), mode='constant', - constant_values=(padding_value, padding_value) - ) - ) - else: - return ToTensor( - np.pad( - image, ((0, 0), (0, 0), (top, bottom), (left, right)), mode='constant', - constant_values=(padding_value, padding_value) - ) - ) - - -def Pad(image, padding, padding_value=0, mode='constant', is_hwc=True): - ''' - - Parameters - ---------- - image: - A 3-D or 4-D Tensor. - padding: - An integer or a list/tuple. If a single number is provided, pad all borders with this value. - If a tuple or list of 2 values is provided, pad the left and top with the first value and the right and bottom with the second value. - If 4 values are provided as a list or tuple, pad the left, top, right and bottom respectively. - padding_value: - In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. - mode: - One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) - Returns: - A padded Tensor. Has the same type as tensor. - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = image.shape - if len(shape_size) == 3: - batch_size = 0 - elif len(shape_size) == 4: - batch_size = shape_size[0] - else: - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - if mode not in ('constant', 'edge', 'reflect', 'symmetric'): - raise TypeError('mode should be one of (constant,edge,reflect,symmetric).') - - if isinstance(padding, int): - padding = ((padding, padding), (padding, padding)) - elif isinstance(padding, list) or isinstance(padding, tuple): - if len(padding) == 2: - padding = ((padding[0], padding[0]), (padding[1], padding[1])) - elif len(padding) == 4: - padding = ((padding[0], padding[1]), (padding[2], padding[3])) - else: - raise ValueError('The length of padding should be 2 or 4, but got {}.'.format(len(padding))) - else: - raise TypeError('Padding should be an integer or a list/tuple, but got {}.'.format(type(padding))) - - if batch_size == 0: - if is_hwc: - padding = (padding[0], padding[1], (0, 0)) - else: - padding = ( - (0, 0), - padding[0], - padding[1], - ) - else: - if is_hwc: - padding = ((0, 0), padding[0], padding[1], (0, 0)) - else: - padding = ((0, 0), (0, 0), padding[0], padding[1]) - if mode == 'constant': - return ToTensor(np.pad(image, padding, mode=mode, constant_values=(padding_value, padding_value))) - else: - return ToTensor(np.pad(image, padding, mode=mode)) - - -def Standardization(image, mean=None, std=None, channel_mode=False, is_hwc=True): - ''' - - Parameters - ---------- - image: - An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. - mean: - List or tuple of mean values for each channel, with respect to channel order. - std: - List or tuple of standard deviations for each channel. - channel_mode: - Decide to implement standardization on whole image or each channel of image. - Returns: - A Tensor with the same shape and dtype as image. - ------- - ''' - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - num_shape = image.shape - if is_hwc: - height, width, channels = 0, 1, 2 - else: - channels, height, width = 0, 1, 2 - if mean is not None and std is not None: - if len(mean) != len(std): - raise ValueError("Length of mean and std must be equal") - if len(mean) == 1: - mean = [mean[0]] * num_shape[channels] - std = [std[0]] * num_shape[channels] - mean = np.array(mean, dtype=image.dtype) - std = np.array(std, dtype=image.dtype) - return ToTensor((image - mean[:, None, None]) / std[:, None, None]) - elif mean is None and std is None: - if channel_mode: - num_pixels = num_shape[height] * num_shape[width] - image_mean = np.mean(image, axis=(height, width)) - stddev = np.std(image, axis=(height, width)) - min_sttdev = 1 / np.sqrt(num_pixels) - min_sttdev = [min_sttdev] * num_shape[channels] - adjusted_sttdev = np.maximum(stddev, min_sttdev) - image -= image_mean - image = np.divide(image, adjusted_sttdev) - return ToTensor(image) - else: - num_pixels = num_shape[height] * num_shape[width] * num_shape[channels] - image_mean = np.mean(image, axis=(0, 1, 2)) - image_mean = [image_mean] * 3 - stddev = np.std(image, axis=(0, 1, 2)) - min_sttdev = 1 / np.sqrt(num_pixels) - adjusted_sttdev = np.maximum(stddev, min_sttdev) - adjusted_sttdev = [adjusted_sttdev] * 3 - image -= image_mean - image = np.divide(image, adjusted_sttdev) - return ToTensor(image) - else: - raise ValueError('std and mean must both be None or not None') - - -def RandomBrightness(image, factor): - ''' - - Parameters - ---------- - image: - An image or images to adjust - factor: - Float, must be non-negative. Factor must be (0,1). Random range will be [-factor, factor). - Returns: - The brightness-adjusted image(s). - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if factor < 0 or factor > 1: - raise ValueError('factor should be in [0,1].') - delta = random.uniform(-factor, factor) - image = image + delta * 255 - image = np.clip(image, 0, 255) - - return image - - -def RandomContrast(image, lower, upper, seed=None): - ''' - - Parameters - ---------- - image: - An image tensor with 3 or more dimensions. - lower: - float. Lower bound for the random contrast factor. - upper: - float. Upper bound for the random contrast factor. - seed: - A Python integer. Used to create a random seed. - - Returns: - The contrast-adjusted image(s). - ------- - ''' - if upper <= lower: - raise ValueError('upper must be > lower') - if lower < 0: - raise ValueError('lower must be non-negative') - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - factor = random.uniform(lower, upper) - image = ImageEnhance.Contrast(image).enhance(factor) - - return ToTensor(image) - - -def RandomHue(image, factor, seed=None): - ''' - - Parameters - ---------- - image: - RGB image or images. The size of the last dimension must be 3. - factor: - float. The maximum value for the random factor. - seed: - An operation-specific seed. I - - Returns: - Adjusted numpy ndarrry image(s). - ------- - - ''' - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - if factor > 0.5 or factor < 0: - raise ValueError('factor should be in [0,0.5].') - - image_hue_factor = random.uniform(-factor, factor) - mode = image.mode - if mode in {'L', '1', 'I', 'F'}: - return image - - hue, saturation, value = image.convert('HSV').split() - - np_hue = np.array(hue, dtype=np.uint8) - - with np.errstate(over='ignore'): - np_hue += np.uint8(image_hue_factor * 255) - hue = Image.fromarray(np_hue, 'L') - - image = Image.merge('HSV', (hue, saturation, value)).convert(mode) - - return ToTensor(image) - - -def RandomSaturation(image, lower, upper, seed=None): - ''' - Parameters - ---------- - image: - RGB image or images. The size of the last dimension must be 3. - lower: - float. Lower bound for the random saturation factor. - upper: - float. Upper bound for the random saturation factor. - seed: - An operation-specific seed. - - Returns; - Adjusted numpy ndarray image(s). - ------- - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - if upper <= lower: - raise ValueError('upper must be > lower.') - - if lower < 0: - raise ValueError('lower must be non-negative.') - factor = random.uniform(lower, upper) - enhancer = ImageEnhance.Color(image) - image = enhancer.enhance(factor) - - return ToTensor(image) - - -def RandomCrop(image, size, is_hwc=True): - ''' - - Parameters - ---------- - image: - Input an image to crop. - size: - if size is an integer, shape of cropped image will be [size, size, 3]. if length of size is 2. - shape of cropped image will be [height, width, 3]. - Returns: - A cropped image of the same rank as image and shape size. - ------- - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if isinstance(size, int): - size = (size, size) - elif isinstance(size, (tuple, list)) and len(size) == 2: - size = size - else: - raise ValueError("Size should be a single integer or a list/tuple (h, w) of length 2.") - - def _input_to_factor_(image, size, is_hwc): - if len(image.shape) == 3: - if is_hwc: - height, width, channels = image.shape - else: - channels, height, width = image.shape - else: - if is_hwc: - batch, height, width, channels = image.shape - else: - batch, channels, height, width = image.shape - - target_height, target_width = size - if target_height > height or target_width > width: - raise ValueError("Crop size {} is larger than input image size {}".format(size, (height, width))) - if target_height == height and target_width == width: - return 0, 0, height, width - - top = random.randint(0, height - target_height) - left = random.randint(0, width - target_width) - return top, left, target_height, target_width - - top, left, height, width = _input_to_factor_(image, size, is_hwc) - - return Crop(image, top, left, height, width, is_hwc) - - -def Resize(image, size, method='bilinear', preserve_aspect_ratio=False, antialias=False): - ''' - - Parameters - ---------- - images: - Input an image to resize - size: - if size is an integer, shape of resized image will be [size, size, 3]. if length of size is 2. - shape of resized image will be [height, width, 3]. - method: - An image.ResizeMethod, or string equivalent. Defaults to bilinear. - preserve_aspect_ratio: - Whether to preserve the aspect ratio. - antialias: - Whether to use an anti-aliasing filter when downsampling an image. - Returns: - an resized image - ------- - ''' - DE_PY_INTER_MODE = { - 'nearest': Image.NEAREST, - 'bilinear': Image.BILINEAR, - 'cubic': Image.CUBIC, - 'lanczos': Image.LANCZOS, - 'bicubic': Image.BICUBIC - } - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - if isinstance(size, int): - size = (size, size) - elif isinstance(size, (tuple, list)) or len(size) == 2: - target_height, target_width = size - size = (target_width, target_height) - else: - raise ValueError("Size should be a single integer or a list/tuple (h, w) of length 2.") - if method not in ('nearest', 'bilinear', 'cubic', 'lanczos', 'bicubic'): - raise TypeError('Unknown resize method! resize method must be in (nearest bilinear cubic lanczos bicubic)') - - if preserve_aspect_ratio: - width, height = image.size - target_width, target_height = size - scale_factor_height = float(target_height / height) - scale_factor_width = float(target_width / width) - scale_factor = np.minimum(scale_factor_height, scale_factor_width) - new_target_height = int(scale_factor * height) - new_target_width = int(scale_factor * width) - size = (new_target_width, new_target_height) - interpolation = DE_PY_INTER_MODE[method] - image = image.resize(size, interpolation) - if antialias: - image = image.resize(size, Image.ANTIALIAS) - - return ToTensor(image) - - -def CropAndResize(image, boxes, box_indices, crop_size, method='bilinear', extrapolation_value=0, is_hwc=True): - ''' - - Parameters - ---------- - image: - A 4-D tensor of shape [batch, image_height, image_width, depth]. Both image_height and image_width need to be positive. - boxes: - A 2-D tensor of shape [num_boxes, 4]. - box_indices: - A 1-D tensor of shape [num_boxes] with int32 values in [0,batch). - The value of box_ind[i] specifies the image that the i-th box refers to. - crop_size: - A 1-D tensor of 2 elements, size = [crop_height, crop_width]. All cropped image patches are resized to this size. - The aspect ratio of the image content is not preserved. Both crop_height and crop_width need to be positive. - method: - An optional string specifying the sampling method for resizing. - It can be either "bilinear" or "nearest" and default to "bilinear". - extrapolation_value: - An optional float. Defaults to 0. Value used for extrapolation, when applicable. - Returns: - A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. - ------- - - ''' - if method not in ["bilinear", "nearest"]: - raise ValueError('method must be bilinear or nearest.') - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - boxes = np.asarray(boxes) - box_indices = np.asarray(box_indices) - image_shape = image.shape - if len(image_shape) == 4: - batch_size = image_shape[0] - elif len(image_shape) == 3: - image = np.expand_dims(image, axis=0) - else: - raise ValueError('Input must be a 3-D or 4-D image Tensor.') - - box_num = boxes.shape[0] # boxes.shape is [n,4]. n is the number of boxes. - if not is_hwc: # 判断通道顺序,为了便于后续计算,将通道顺序调整为HWC or BHWC - image = np.transpose(image, (0, 2, 3, 1)) - batch_size, height, width, channels = image.shape - return_image = np.zeros((box_num, crop_size[0], crop_size[1], 3)) - for i in range(box_num): - y1, x1, y2, x2 = boxes[i] # 首先判断图像是否需要翻转 , 若y1>y2 需要垂直翻转, 若x1>x2 需要水平翻转 - cur_image = image[box_indices[i]] - if y1 > y2: - cur_image = FlipVertical(cur_image) - y1, y2 = y2, y1 - if x1 > x2: - cur_image = FlipHorizontal(cur_image) - x1, x2 = x2, x1 - top_padding = 0 if y1 > 0 else int(round(height * (-y1))) - left_padding = 0 if x1 > 0 else int(round(width * (-x1))) - bottom_padding = 0 if y2 < 1 else int(round(height * (y2 - 1))) - right_padding = 0 if x2 < 1 else int(round(width * (x2 - 1))) - # 判断是否需要padding - target_height = top_padding + height + bottom_padding - target_width = left_padding + width + right_padding - if target_height != height or target_width != width: - cur_image = PadToBoundingbox( - cur_image, offset_height=top_padding, offset_width=left_padding, target_height=target_height, - target_width=target_width, padding_value=extrapolation_value, is_hwc=is_hwc - ) - offset_height = 0 if y1 < 0 else int(round(height * y1)) - offset_width = 0 if x1 < 0 else int(round(width * x1)) - target_height = int(round(height * (y2 - y1))) - target_width = int(round(width * (x2 - x1))) - crop_image = Crop(cur_image, offset_height, offset_width, target_height, target_width, is_hwc) - resized_image = Resize(crop_image, crop_size, method=method) - return_image[i] = resized_image - if not is_hwc: - return_image = np.transpose(return_image, (0, 3, 1, 2)) - return ToTensor(return_image) - - -def CropOrPad(image, target_height, target_width, is_hwc=True): - ''' - Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. - Parameters - ---------- - image: - 3-D Tensor of shape [height, width, channels]. - target_height: - Target height. - target_width: - Target width. - Returns: - Cropped and/or padded image. - ------- - ''' - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = len(image.shape) - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - if target_height < 0: - raise ValueError('target_height must be >0.') - if target_width < 0: - raise ValueError('target_width must be >0.') - if shape_size == 3: - if is_hwc: - height, width, channels = image.shape - else: - channels, height, width = image.shape - else: - if is_hwc: - batch, height, width, channels = image.shape - else: - batch, channels, height, width = image.shape - offset_height = height - target_height - offset_width = width - target_width - offset_crop_height = max(offset_height // 2, 0) - offset_crop_width = max(offset_width // 2, 0) - offset_pad_height = max(-offset_height // 2, 0) - offset_pad_width = max(-offset_width // 2, 0) - cropped = Crop( - image, offset_crop_height, offset_crop_width, min(height, target_height), min(width, target_width), is_hwc - ) - - padded = PadToBoundingbox(cropped, offset_pad_height, offset_pad_width, target_height, target_width, is_hwc=is_hwc) - - return ToTensor(padded) - - -def ResizeAndPad(image, target_height, target_width, method='bilinear', antialias=False, is_hwc=True): - ''' - - Parameters - ---------- - image: - 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. - target_height: - Target height. - target_width: - Target height. - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - Returns: - Resized and padded image. If images was 4-D, a 4-D float Tensor of shape [batch, new_height, new_width, channels]. - If images was 3-D, a 3-D float Tensor of shape [new_height, new_width, channels]. - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - shape_size = len(image.shape) - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C, H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - if target_height < 0: - raise ValueError('target_height must be >0.') - if target_width < 0: - raise ValueError('target_width must be >0.') - if shape_size == 3: - if is_hwc: - height, width, channels = image.shape - else: - channels, height, width = image.shape - else: - if is_hwc: - batch, height, width, channels = image.shape - else: - batch, channels, height, width = image.shape - height = float(height) - width = float(width) - ratio = max(height / target_height, width / target_width) - resized_height = int(round(height / ratio)) - resized_width = int(round(width / ratio)) - padding_height = max(0, int(round((target_height - resized_height) / 2))) - padding_width = max(0, int(round((target_width - resized_width) / 2))) - resized = Resize( - image, size=(resized_height, resized_width), method=method, antialias=antialias - ) #需要解决 batch images的resize - padded = PadToBoundingbox(resized, padding_height, padding_width, target_height, target_width, is_hwc=is_hwc) - return ToTensor(padded) - - -def rgb_to_hsv(np_rgb_img, is_hwc): - """ - Convert RGB img to HSV img. - - Args: - np_rgb_img (numpy.ndarray): NumPy RGB image array of shape (H, W, C) or (C, H, W) to be converted. - is_hwc (Bool): If True, the shape of np_hsv_img is (H, W, C), otherwise must be (C, H, W). - - Returns: - np_hsv_img (numpy.ndarray), NumPy HSV image with same type of np_rgb_img. - """ - if is_hwc: - r, g, b = np_rgb_img[:, :, 0], np_rgb_img[:, :, 1], np_rgb_img[:, :, 2] - else: - r, g, b = np_rgb_img[0, :, :], np_rgb_img[1, :, :], np_rgb_img[2, :, :] - to_hsv = np.vectorize(colorsys.rgb_to_hsv) - h, s, v = to_hsv(r, g, b) - if is_hwc: - axis = 2 - else: - axis = 0 - np_hsv_img = np.stack((h, s, v), axis=axis) - return np_hsv_img - - -def RgbToHsv(image, is_hwc=True): - - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - - shape_size = len(image.shape) - - if not shape_size in (3, 4): - raise TypeError( - 'img shape should be (H, W, C)/(N, H, W, C)/(C ,H, W)/(N, C, H, W). \ - Got {}'.format(image.shape) - ) - - if shape_size == 3: - batch_size = 0 - if is_hwc: - num_channels = image.shape[2] - else: - num_channels = image.shape[0] - else: - batch_size = image.shape[0] - if is_hwc: - num_channels = image.shape[3] - else: - num_channels = image.shape[1] - - if num_channels != 3: - raise TypeError('img should be 3 channels RGB img. Got {} channels'.format(num_channels)) - if batch_size == 0: - return ToTensor(rgb_to_hsv(image, is_hwc)) - return ToTensor([rgb_to_hsv(img, is_hwc) for img in image]) - - -def Transpose(image, order): - """ - Transpose the input image with order - """ - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if len(image.shape) == 3: - if len(order) != 3: - raise ValueError('if image is 3-D tensor, order should be a list/tuple with length of 3') - return ToTensor(np.transpose(image, order)) - elif len(image.shape) == 4: - if len(order) != 3: - raise ValueError('if image is 3-D tensor, order should be a list/tuple with length of 3') - return ToTensor(np.transpose(image, order)) - else: - raise ValueError('\'image\' must have either 3 or 4 dimensions.') - - -def RandomRotation( - image, degrees, fill_mode='nearest', fill_value=0, center=None, expand=False, is_hwc=True, interpolation_order=1 -): - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - if isinstance(degrees, numbers.Number): - if degrees < 0: - raise ValueError("If degrees is a single number, it cannot be negative.") - degrees = (-degrees, degrees) - elif isinstance(degrees, (list, tuple)): - if len(degrees) != 2: - raise ValueError("If degrees is a sequence, the length must be 2.") - else: - raise TypeError("Degrees must be a single non-negative number or a sequence") - - DE_PY_INTER_MODE = { - 'nearest': Image.NEAREST, - 'bilinear': Image.BILINEAR, - 'antialias': Image.ANTIALIAS, - 'bicubic': Image.BICUBIC - } - if fill_mode not in ('nearest', 'bilinear', 'antialias', 'bicubic'): - raise TypeError('Fill_mode must be in (nearest,bilinear, antialias,bicubic)') - - if isinstance(fill_value, int): - fill_value = tuple([fill_value] * 3) - - angle = random.uniform(degrees[0], degrees[1]) - fill_mode = DE_PY_INTER_MODE[fill_mode] - return ToTensor(image.rotate(angle, fill_mode, expand, center, fillcolor=fill_value)) - - -def RandomShift(image, shift, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): - ''' - - Parameters - ---------- - image - Input tensor. Must be 3D. - shift: - int or list/tuple, if shift is int, Width shift range will equal to height shift range. - if shift is list/tuple, shift range will be [width fraction, height fraction] - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - fill_mode: - Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). - fill_value: - Value used for points outside the boundaries of the input if mode='constant'. - interpolation_order - int, order of spline interpolation. see ndimage.interpolation.affine_transform - Returns - Shifted Numpy image tensor. - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - - if isinstance(shift, numbers.Number): - width_fraction = shift - height_fraction = shift - elif isinstance(shift, list) or isinstance(shift, tuple): - if len(shift) == 2: - width_fraction = shift[0] - height_fraction = shift[1] - else: - raise ValueError('shift must be int or list/tuple of length 2') - - DE_PY_INTER_MODE = {'nearest': Image.NEAREST, 'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC} - if fill_mode not in ('nearest', 'bilinear', 'bicubic'): - raise TypeError('Fill_mode must be in (nearest,bilinear,bicubic)') - fill_mode = DE_PY_INTER_MODE[fill_mode] - width, height = image.size - max_dx = width_fraction * width - max_dy = height_fraction * height - translations = (np.round(random.uniform(-max_dx, max_dx)), np.round(random.uniform(-max_dy, max_dy))) - - scale = 1.0 - shear = 0.0 - output_size = image.size - center = (width * 0.5 + 0.5, height * 0.5 + 0.5) - - angle = math.radians(0) - shear = math.radians(shear) - shear = [shear, 0] - scale = 1.0 / scale - d = math.cos(angle + shear[0]) * math.cos(angle + shear[1]) + \ - math.sin(angle + shear[0]) * math.sin(angle + shear[1]) - matrix = [ - math.cos(angle + shear[0]), - math.sin(angle + shear[0]), 0, -math.sin(angle + shear[1]), - math.cos(angle + shear[1]), 0 - ] - matrix = [scale / d * m for m in matrix] - matrix[2] += matrix[0] * (-center[0] - translations[0]) + matrix[1] * (-center[1] - translations[1]) - matrix[5] += matrix[3] * (-center[0] - translations[0]) + matrix[4] * (-center[1] - translations[1]) - - # Apply center translation: C * RSS^-1 * C^-1 * T^-1 - matrix[2] += center[0] - matrix[5] += center[1] - - if __version__ >= '5': - kwargs = {"fillcolor": fill_value} - else: - kwargs = {} - return ToTensor(image.transform(output_size, Image.AFFINE, matrix, fill_mode, **kwargs)) - - -def RandomShear(image, degree, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): - ''' - - Parameters - ---------- - image - Input tensor. Must be 3D. - shift: - int or list/tuple, if shift is int, Width shift range will equal to height shift range. - if shift is list/tuple, shift range will be [width fraction, height fraction] - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - fill_mode: - Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). - fill_value: - Value used for points outside the boundaries of the input if mode='constant'. - interpolation_order - int, order of spline interpolation. see ndimage.interpolation.affine_transform - Returns - Shifted Numpy image tensor. - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - DE_PY_INTER_MODE = {'nearest': Image.NEAREST, 'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC} - if fill_mode not in ('nearest', 'bilinear', 'bicubic'): - raise TypeError('Fill_mode must be in (nearest,bilinear,bicubic)') - fill_mode = DE_PY_INTER_MODE[fill_mode] - width, height = image.size - translations = (0, 0) - scale = 1.0 - shear = degree - output_size = image.size - center = (width * 0.5 + 0.5, height * 0.5 + 0.5) - angle = math.radians(0) - - if shear is not None: - if isinstance(shear, numbers.Number): - shear = (-1 * shear, shear) - shear = [random.uniform(shear[0], shear[1]), random.uniform(shear[0], shear[1])] - elif len(shear) == 2 or len(shear) == 4: - if len(shear) == 2: - shear = [shear[0], shear[1], shear[0], shear[1]] - elif len(shear) == 4: - shear = [s for s in shear] - shear = [random.uniform(shear[0], shear[1]), random.uniform(shear[2], shear[3])] - else: - raise ValueError( - "Shear should be a single value or a tuple/list containing " + "two values. Got {}".format(shear) - ) - shear = [math.radians(s) for s in shear] - else: - shear = [0, 0] - - - d = math.cos(angle + shear[0]) * math.cos(angle + shear[1]) + \ - math.sin(angle + shear[0]) * math.sin(angle + shear[1]) - matrix = [ - math.cos(angle + shear[0]), - math.sin(angle + shear[0]), 0, -math.sin(angle + shear[1]), - math.cos(angle + shear[1]), 0 - ] - matrix = [scale / d * m for m in matrix] - matrix[2] += matrix[0] * (-center[0] - translations[0]) + matrix[1] * (-center[1] - translations[1]) - matrix[5] += matrix[3] * (-center[0] - translations[0]) + matrix[4] * (-center[1] - translations[1]) - - # Apply center translation: C * RSS^-1 * C^-1 * T^-1 - matrix[2] += center[0] - matrix[5] += center[1] - - if __version__ >= '5': - kwargs = {"fillcolor": fill_value} - else: - kwargs = {} - return ToTensor(image.transform(output_size, Image.AFFINE, matrix, fill_mode, **kwargs)) - - -def RandomZoom(image, zoom_range, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): - ''' - - Parameters - ---------- - image: - Input tensor. Must be 3D. - zoom_range: - Tuple of floats; zoom range for width and height. - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - fill_mode: - Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). - fill_value: - Value used for points outside the boundaries of the input if mode='constant'. - interpolation_order: - int, order of spline interpolation. see ndimage.interpolation.affine_transform - - Returns - Zoomed Numpy image tensor. - ------- - - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, np.ndarray): - image = ToPIL(image) - if not isinstance(image, Image.Image): - raise TypeError(augment_error_message.format(type(image))) - DE_PY_INTER_MODE = {'nearest': Image.NEAREST, 'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC} - if isinstance(zoom_range, list) or isinstance(zoom_range, tuple): - if len(zoom_range) == 2: - scale = random.uniform(zoom_range[0], zoom_range[1]) - else: - raise ValueError('The length of zoom_range must be 2') - else: - raise ValueError( - "Zoom_range should be a single value or a tuple/list containing " + "two values. Got {}".format(zoom_range) - ) - if fill_mode not in ('nearest', 'bilinear', 'bicubic'): - raise TypeError('Fill_mode must be in (nearest,bilinear,bicubic)') - fill_mode = DE_PY_INTER_MODE[fill_mode] - width, height = image.size - translations = (0, 0) - shear = (0, 0) - output_size = image.size - center = (width * 0.5 + 0.5, height * 0.5 + 0.5) - angle = math.radians(0) - - d = math.cos(angle + shear[0]) * math.cos(angle + shear[1]) + \ - math.sin(angle + shear[0]) * math.sin(angle + shear[1]) - matrix = [ - math.cos(angle + shear[0]), - math.sin(angle + shear[0]), 0, -math.sin(angle + shear[1]), - math.cos(angle + shear[1]), 0 - ] - matrix = [scale / d * m for m in matrix] - matrix[2] += matrix[0] * (-center[0] - translations[0]) + matrix[1] * (-center[1] - translations[1]) - matrix[5] += matrix[3] * (-center[0] - translations[0]) + matrix[4] * (-center[1] - translations[1]) - - # Apply center translation: C * RSS^-1 * C^-1 * T^-1 - matrix[2] += center[0] - matrix[5] += center[1] - - if __version__ >= '5': - kwargs = {"fillcolor": fill_value} - else: - kwargs = {} - return ToTensor(image.transform(output_size, Image.AFFINE, matrix, fill_mode, **kwargs)) - - -def Rescale(image, scale, offset=0): - ''' - - Parameters - ---------- - image: - 3-D image or 4-D images - scale: - Float, the scale to apply to the inputs. - offset: - Float, the offset to apply to the inputs. - Returns: - rescaled images - ------- - ''' - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - - return ToTensor(image * scale + offset) - - -def RandomFlipVertical(image, prob=0.5): - - if prob > random.random(): - image = FlipVertical(image) - return image - - -def RandomFlipHorizontal(image, prob=0.5): - - if prob > random.random(): - image = FlipHorizontal(image) - return image - - -def HWC2CHW(image): - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - - image_shape = image.shape - if (len(image_shape) == 3): - return Transpose(image, (2, 0, 1)) - elif (len(image_shape) == 4): - return Transpose(image, (0, 3, 1, 2)) - else: - raise ValueError('\'image\' must have either 3 or 4 dimensions.') - - -def CHW2HWC(image): - if not isinstance(image, np.ndarray) and not isinstance(image, Image.Image): - image = Decode(image) - if isinstance(image, Image.Image) or isinstance(image, np.ndarray): - image = ToTensor(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - - image_shape = image.shape - if (len(image_shape) == 3): - return Transpose(image, (1, 2, 0)) - elif (len(image_shape) == 4): - return Transpose(image, (0, 2, 3, 1)) - else: - raise ValueError('\'image\' must have either 3 or 4 dimensions.') diff --git a/tensorlayer/dataflow/image/paddle_image.py b/tensorlayer/dataflow/image/paddle_image.py deleted file mode 100644 index b33ef15..0000000 --- a/tensorlayer/dataflow/image/paddle_image.py +++ /dev/null @@ -1,19 +0,0 @@ -import paddle -import numpy as np -from PIL import Image -from paddle.vision.transforms import functional as F - -__all_ = [ - 'Standardization', -] - - -def Standardization(img, mean, std, data_format='HWC'): - - if data_format == 'CHW': - mean = paddle.to_tensor(mean).reshape([-1, 1, 1]) - std = paddle.to_tensor(std).reshape([-1, 1, 1]) - else: - mean = paddle.to_tensor(mean) - std = paddle.to_tensor(std) - return (img - mean) / std diff --git a/tensorlayer/dataflow/image/tensorflow_image.py b/tensorlayer/dataflow/image/tensorflow_image.py deleted file mode 100644 index ca0ce41..0000000 --- a/tensorlayer/dataflow/image/tensorflow_image.py +++ /dev/null @@ -1,760 +0,0 @@ -import tensorflow as tf -import numpy as np -from tensorflow.python.ops import math_ops -from tensorflow.python.ops import array_ops -from tensorflow.python.framework import ops -from tensorflow.python.ops.image_ops_impl import _AssertAtLeast3DImage -from tensorflow.python.framework import dtypes -from tensorflow.python.ops.image_ops_impl import convert_image_dtype -import numbers - -__all__ = [ - 'CentralCrop', - 'HsvToRgb', - 'AdjustBrightness', - 'AdjustContrast', - 'AdjustHue', - 'AdjustSaturation', - 'Crop', - 'FlipHorizontal', - 'FlipVertical', - 'GrayToRgb', - 'Standardization', - 'RgbToGray', - 'PadToBoundingbox', - 'Pad', - 'RandomBrightness', - 'RandomContrast', - 'RandomHue', - 'RandomSaturation', - 'RandomCrop', - 'Resize', - 'CropAndResize', - 'CropOrPad', - 'ResizeAndPad', - 'RgbToHsv', - 'Transpose', - 'RandomRotation', - 'RandomShift', - 'RandomShear', - 'RandomZoom', - 'Rescale', - 'RandomFlipVertical', - 'RandomFlipHorizontal', - 'HWC2CHW', - 'CHW2HWC', -] - - -def CentralCrop(image, central_fraction=None, size=None): - ''' - - Parameters - ---------- - image : - input Either a 3-D float Tensor of shape [height, width, depth], - or a 4-D Tensor of shape [batch_size, height, width, depth]. - central_fraction : - float (0, 1], fraction of size to crop - size: - size (Union[int, sequence]) – The output size of the cropped image. If size is an integer, a square crop of size (size, size) is returned. - If size is a sequence of length 2, it should be (height, width). - Returns : - 3-D / 4-D float Tensor, as per the input. - ------- - If backend is tensorflow, central_fraction will be used preferentially. if size is used,the height-width ratio will be equivalent to original ratio.. - If backend is mindspore, size will be used preferentially. - ''' - if size is None and central_fraction is None: - raise ValueError('central_fraction and size can not be both None') - - if central_fraction is None: - outshape = np.shape(image) - if len(outshape) == 3: - h_axis = 0 - w_axis = 1 - elif len(outshape) == 4: - h_axis = 1 - w_axis = 2 - - if isinstance(size, numbers.Number): - target_height = size - target_width = size - elif isinstance(size, tuple) or isinstance(size, list): - if len(size) == 2: - target_height = size[0] - target_width = size[1] - else: - raise ValueError('The length of size must be 2') - else: - raise ValueError("Size should be a single integer or a list/tuple (h, w) of length 2.") - if target_height > outshape[h_axis] or target_width > outshape[w_axis]: - raise ValueError("Centralcrop image size must < original image size.") - central_fraction = max(target_height / outshape[h_axis], target_width / outshape[w_axis]) - else: - if central_fraction > 1 or central_fraction <= 0: - raise ValueError('central_fraction must be in (0,1].') - - return tf.image.central_crop(image, central_fraction) - - -def HsvToRgb(image): - - return tf.image.hsv_to_rgb(image) - - -def AdjustBrightness(image, factor): - - return tf.image.adjust_brightness(image, delta=factor) - - -def AdjustContrast(image, factor): - - return tf.image.adjust_contrast(image, contrast_factor=factor) - - -def AdjustHue(image, factor): - - return tf.image.adjust_hue(image, delta=factor) - - -def AdjustSaturation(image, factor): - - return tf.image.adjust_saturation(image, saturation_factor=factor) - - -def Crop(image, offset_height, offset_width, target_height, target_width, is_hwc=True): - ''' - - Parameters - ---------- - image: - A image or a batch of images - offset_height: - Vertical coordinate of the top-left corner of the result in the input. - offset_width: - Horizontal coordinate of the top-left corner of the result in the input. - target_height: - Height of the result. - target_width: - Width of the result. - - Returns: - Output [batch, target_height, target_width, channels] or [target_height, target_width, channels] - ------- - ''' - - return tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width) - - -def FlipHorizontal(image): - - return tf.image.flip_left_right(image) - - -def FlipVertical(image): - - return tf.image.flip_up_down(image) - - -def GrayToRgb(image): - - return tf.image.grayscale_to_rgb(image) - - -def RgbToGray(image): - - return tf.image.rgb_to_grayscale(image) - - -def PadToBoundingbox(image, offset_height, offset_width, target_height, target_width, padding_value=0, is_hwc=True): - - return tf.image.pad_to_bounding_box( - image, - offset_height, - offset_width, - target_height, - target_width, - ) - - -def Pad(image, padding, padding_value=0, mode='constant'): - ''' - - Parameters - ---------- - image: - A 3-D or 4-D Tensor. - padding: - An integer or a list/tuple. If a single number is provided, pad all borders with this value. - If a tuple or list of 2 values is provided, pad the left and top with the first value and the right and bottom with the second value. - If 4 values are provided as a list or tuple, pad the (top, bottom, left, right) respectively. - padding_value: - In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. - mode: - One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) - Returns: - A padded Tensor. Has the same type as tensor. - ------- - - ''' - image_shape = image.shape - if len(image_shape) == 3: - batch_size = 0 - elif len(image_shape) == 4: - batch_size = image_shape[0] - else: - raise TypeError('Image must be a 3-D tensor or 4-D tensor.') - - if isinstance(padding, int): - padding = ((padding, padding), (padding, padding)) - elif isinstance(padding, list) or isinstance(padding, tuple): - if len(padding) == 2: - padding = ((padding[0], padding[0]), (padding[1], padding[1])) - elif len(padding) == 4: - padding = ((padding[0], padding[1]), (padding[2], padding[3])) - else: - raise ValueError('The length of padding should be 2 or 4, but got {}.'.format(len(padding))) - else: - raise TypeError('Padding should be an integer or a list/tuple, but got {}.'.format(type(padding))) - if batch_size == 0: - padding = (padding[0], padding[1], (0, 0)) - else: - padding = ((0, 0), padding[0], padding[1], (0, 0)) - - return tf.pad(image, padding, mode=mode, constant_values=padding_value) - - -def Standardization(image, mean=None, std=None, channel_mode=False): - ''' - - Parameters - ---------- - image: - An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. - mean: - List or tuple of mean values for each channel, with respect to channel order. - std: - List or tuple of standard deviations for each channel. - channel_mode: - Decide to implement standardization on whole image or each channel of image. - Returns: - A Tensor with the same shape and dtype as image. - ------- - ''' - image = tf.cast(image, tf.float32) - with ops.name_scope(None, 'Standardization', [image]) as scope: - image = ops.convert_to_tensor(image, name='image') - image = _AssertAtLeast3DImage(image) - - orig_dtype = image.dtype - if orig_dtype not in [dtypes.float16, dtypes.float32]: - image = convert_image_dtype(image, dtypes.float32) - - if mean is not None and std is not None: - mean = np.array(mean, dtype=np.float32) - std = np.array(std, dtype=np.float32) - image -= mean - image = math_ops.divide(image, std, name=scope) - return convert_image_dtype(image, orig_dtype, saturate=True) - - elif mean is None and std is None: - if channel_mode: - num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:-1]) - #`num_pixels` is the number of elements in each channels of 'image' - image_mean = math_ops.reduce_mean(image, axis=[-2, -3], keepdims=True) - # `image_mean` is the mean of elements in each channels of 'image' - - stddev = math_ops.reduce_std(image, axis=[-2, -3], keepdims=True) - min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, image.dtype)) - adjusted_sttdev = math_ops.maximum(stddev, min_stddev) - - image -= image_mean - image = math_ops.divide(image, adjusted_sttdev, name=scope) - return convert_image_dtype(image, orig_dtype, saturate=True) - - else: - num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:]) - #`num_pixels` is the number of elements in `image` - image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True) - - # Apply a minimum normalization that protects us against uniform images. - stddev = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True) - min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, image.dtype)) - adjusted_stddev = math_ops.maximum(stddev, min_stddev) - - image -= image_mean - image = math_ops.divide(image, adjusted_stddev, name=scope) - return convert_image_dtype(image, orig_dtype, saturate=True) - else: - raise ValueError('std and mean must both be None or not None') - - -def RandomBrightness(image, factor): - ''' - - Parameters - ---------- - image: - An image or images to adjust - factor: - Float, must be non-negative. Factor must be (0,1). Random range will be [-factor, factor). - Returns: - The brightness-adjusted image(s). - ------- - - ''' - - return tf.image.random_brightness(image, factor) - - -def RandomContrast(image, lower, upper, seed=None): - ''' - - Parameters - ---------- - image: - An image tensor with 3 or more dimensions. - lower: - float. Lower bound for the random contrast factor. - upper: - float. Upper bound for the random contrast factor. - seed: - A Python integer. Used to create a random seed. - - Returns: - The contrast-adjusted image(s). - ------- - ''' - - return tf.image.random_contrast(image, lower, upper, seed) - - -def RandomHue(image, factor, seed=None): - ''' - - Parameters - ---------- - image: - RGB image or images. The size of the last dimension must be 3. - factor: - float. The maximum value for the random factor. - seed: - An operation-specific seed. - - Returns: - Adjusted image(s), same shape and DType as `image`. - ------- - - ''' - - return tf.image.random_hue(image, factor, seed) - - -def RandomSaturation(image, lower, upper, seed=None): - ''' - Parameters - ---------- - image: - RGB image or images. The size of the last dimension must be 3. - lower: - float. Lower bound for the random saturation factor. - upper: - float. Upper bound for the random saturation factor. - seed: - An operation-specific seed. - - Returns: - Adjusted image(s), same shape and DType as `image`. - ------- - ''' - - return tf.image.random_saturation(image, lower, upper, seed) - - -def RandomCrop(image, size): - ''' - - Parameters - ---------- - image: - Input an image to crop. - size: - a list or tuple. if size is an integer, shape of cropped image will be [size, size, 3]. if length of size is 2. - shape of cropped image will be [height, width, 3]. - Returns: - A cropped image of the same rank as image and shape size. - ------- - ''' - - if isinstance(size, int): - crop_size = (size, size) - elif isinstance(size, (list, tuple)) and len(size) == 2: - crop_size = (size[0], size[1]) - else: - raise ValueError("Size should be a single integer or a list/tuple (h, w) of length 2.") - - if len(image.shape) == 3: - h, w, c = image.shape - crop_size = crop_size + (c, ) - elif len(image.shape) == 4: - b, h, w, c = image.shape - crop_size = (b, ) + crop_size + (c, ) - - return tf.image.random_crop(image, size=crop_size) - - -def Resize(image, size, method='bilinear', preserve_aspect_ratio=False, antialias=False): - ''' - - Parameters - ---------- - images: - Input an image to resize - size: - if size is an integer, shape of resized image will be [size, size, 3]. if length of size is 2. - shape of resized image will be [height, width, 3]. - method: - An image.ResizeMethod, or string equivalent shoulid be in - (bilinear, lanczos3, lanczos5, bicubic, gaussian, nearest, area, mitchellcubic). - Defaults to bilinear. - preserve_aspect_ratio: - Whether to preserve the aspect ratio. - antialias: - Whether to use an anti-aliasing filter when downsampling an image. - Returns: - an resized image - ------- - - ''' - if isinstance(size, int): - size = [size, size] - elif len(size) != 2: - raise ValueError('Size should be a single integer or a list/tuple (h, w) of length 2.') - - return tf.image.resize(image, size, method, preserve_aspect_ratio, antialias) - - -def CropAndResize(image, boxes, box_indices, crop_size, method='bilinear', extrapolation_value=0, is_hwc=True): - ''' - - Parameters - ---------- - image: - A 4-D tensor of shape [batch, image_height, image_width, depth]. Both image_height and image_width need to be positive. - boxes: - A 2-D tensor of shape [num_boxes, 4]. - box_indices: - A 1-D tensor of shape [num_boxes] with int32 values in [0,batch). - The value of box_ind[i] specifies the image that the i-th box refers to. - crop_size: - A 1-D tensor of 2 elements, size = [crop_height, crop_width]. All cropped image patches are resized to this size. - The aspect ratio of the image content is not preserved. Both crop_height and crop_width need to be positive. - method: - An optional string specifying the sampling method for resizing. - It can be either "bilinear" or "nearest" and default to "bilinear". - extrapolation_value: - An optional float. Defaults to 0. Value used for extrapolation, when applicable. - Returns: - A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. - ------- - - ''' - image_shape = image.shape - boxes_num = 0 - if isinstance(boxes, tf.Tensor): - boxes_num = boxes.shape[0] - elif isinstance(boxes, np.ndarray) or isinstance(boxes, list) or isinstance(boxes, tuple): - boxes = tf.constant(boxes) - boxes_num = boxes.shape[0] - - if isinstance(crop_size, int): - crop_size = (crop_size, crop_size) - crop_size = tf.constant(crop_size) - elif isinstance(crop_size, np.ndarray) or isinstance(crop_size, list) or isinstance(crop_size, tuple): - crop_size = tf.constant(crop_size) - - if isinstance(box_indices, np.ndarray) or isinstance(box_indices, list) or isinstance(box_indices, tuple): - box_indices = tf.constant(box_indices) - # if input is an image. - # a 3-D Tensor of shape [image_height, image_width, depth] should use 'tf.expand_dims(image, axis = 0)' - # to convert input to a 4-D Tensor of shape [batch_size,image_height, image_width, depth] - if len(image_shape) == 3: - image = tf.expand_dims(image, axis=0) - box_indices = np.zeros((boxes_num), dtype=np.int) - box_indices = tf.constant(box_indices) - - return tf.image.crop_and_resize( - image, boxes=boxes, box_indices=box_indices, crop_size=crop_size, method=method, - extrapolation_value=extrapolation_value - ) - - -def CropOrPad(image, target_height, target_width, is_hwc=True): - ''' - Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. - Parameters - ---------- - image: - 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. - target_height: - Target height. - target_width: - Target width. - Returns: - Cropped and/or padded image. - ------- - ''' - - return tf.image.resize_with_crop_or_pad(image, target_height, target_width) - - -def ResizeAndPad(image, target_height, target_width, method='bilinear', antialias=False, is_hwc=True): - ''' - - Parameters - ---------- - image: - 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. - target_height: - Target height. - target_width: - Target height. - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - Returns: - Resized and padded image. If images was 4-D, a 4-D float Tensor of shape [batch, new_height, new_width, channels]. - If images was 3-D, a 3-D float Tensor of shape [new_height, new_width, channels]. - ------- - - ''' - - return tf.image.resize_with_pad(image, target_height, target_width, method=method, antialias=antialias) - - -def RgbToHsv(image): - - return tf.image.rgb_to_hsv(image) - - -def Transpose(image, order): - image = ops.convert_to_tensor(image) - image = _AssertAtLeast3DImage(image) - shape = image.get_shape() - if shape.ndims == 3 or shape.ndims is None: - if len(order) != 3: - raise ValueError('if image is 3-D tensor, order should be a list/tuple with length of 3') - return array_ops.transpose(image, order) - elif shape.ndims == 4: - if len(order) != 4: - raise ValueError('if image is 4-D tensor, order should be a list/tuple with length of 4') - return array_ops.transpose(image, order) - else: - raise ValueError('\'image\' must have either 3 or 4 dimensions.') - - -def RandomRotation( - image, degrees, fill_mode='nearest', fill_value=0, center=None, expand=False, is_hwc=True, interpolation_order=1 -): - if isinstance(image, tf.Tensor): - image = np.asarray(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if is_hwc: - h, w, c = 0, 1, 2 - else: - h, w, c = 1, 2, 0 - if fill_mode not in ('constant', 'nearest', 'reflect', 'wrap'): - raise TypeError('fill_mode must be in (constant, nearest, reflect, wrap)') - - image = tf.keras.preprocessing.image.random_rotation( - image, degrees, h, w, c, fill_mode, fill_value, interpolation_order - ) - return tf.convert_to_tensor(image) - - -def RandomShift(image, shift, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): - ''' - - Parameters - ---------- - image - Input tensor. Must be 3D. - shift: - int or list/tuple, if shift is int, Width shift range will equal to height shift range. - if shift is list/tuple, shift range will be [width fraction, height fraction] - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - fill_mode: - Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). - fill_value: - Value used for points outside the boundaries of the input if mode='constant'. - interpolation_order - int, order of spline interpolation. see ndimage.interpolation.affine_transform - Returns - Shifted Numpy image tensor. - ------- - - ''' - if isinstance(image, tf.Tensor): - image = np.asarray(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if isinstance(shift, numbers.Number): - width_fraction = shift - height_fraction = shift - elif isinstance(shift, list) or isinstance(shift, tuple): - if len(shift) == 2: - width_fraction = shift[0] - height_fraction = shift[1] - else: - raise ValueError('shift must be number or list/tuple of length 2') - - if is_hwc: - h, w, c = 0, 1, 2 - else: - h, w, c = 1, 2, 0 - if fill_mode not in ('constant', 'nearest', 'reflect', 'wrap'): - raise TypeError('fill_mode must be in (constant, nearest, reflect, wrap)') - - image = tf.keras.preprocessing.image.random_shift( - image, wrg=width_fraction, hrg=height_fraction, row_axis=h, col_axis=w, channel_axis=c, fill_mode=fill_mode, - cval=fill_value, interpolation_order=interpolation_order - ) - - return tf.convert_to_tensor(image) - - -def RandomShear(image, degree, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): - ''' - - Parameters - ---------- - image - Input tensor. Must be 3D. - degree: - Transformation intensity in degrees. - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - fill_mode: - Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). - fill_value: - Value used for points outside the boundaries of the input if mode='constant'. - interpolation_order - int, order of spline interpolation. see ndimage.interpolation.affine_transform - Returns - Shifted Numpy image tensor. - ------- - - ''' - if isinstance(image, tf.Tensor): - image = np.asarray(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if is_hwc: - h, w, c = 0, 1, 2 - else: - h, w, c = 1, 2, 0 - - image = tf.keras.preprocessing.image.random_shear( - image, intensity=degree, row_axis=h, col_axis=w, channel_axis=c, fill_mode=fill_mode, cval=fill_value, - interpolation_order=interpolation_order - ) - return tf.convert_to_tensor(image) - - -def RandomZoom(image, zoom_range, fill_mode='nearest', fill_value=0, is_hwc=True, interpolation_order=1): - ''' - - Parameters - ---------- - image: - Input tensor. Must be 3D. - zoom_range: - Tuple of floats; zoom range for width and height. - is_hwc: - The flag of image shape, (H, W, C) or (N, H, W, C) if True and (C, H, W) or (N, C, H, W) if False (default=True). - fill_mode: - Points outside the boundaries of the input are filled according to the given mode (one of {'constant', 'nearest', 'reflect', 'wrap'}). - fill_value: - Value used for points outside the boundaries of the input if mode='constant'. - interpolation_order: - int, order of spline interpolation. see ndimage.interpolation.affine_transform - - Returns - Zoomed Numpy image tensor. - ------- - - ''' - if isinstance(image, tf.Tensor): - image = np.asarray(image) - if not isinstance(image, np.ndarray): - raise TypeError('img should be NumPy image. Got {}'.format(type(image))) - if isinstance(zoom_range, numbers.Number): - zoom_range = (zoom_range, zoom_range) - elif isinstance(zoom_range, list) or isinstance(zoom_range, tuple): - if len(zoom_range) == 2: - zoom_range = (zoom_range[0], zoom_range[1]) - else: - raise ValueError('shift must be number or list/tuple of length 2') - if is_hwc: - h, w, c = 0, 1, 2 - else: - h, w, c = 1, 2, 0 - - image = tf.keras.preprocessing.image.random_zoom( - image, zoom_range=zoom_range, row_axis=h, col_axis=w, channel_axis=c, fill_mode=fill_mode, cval=fill_value, - interpolation_order=interpolation_order - ) - return tf.convert_to_tensor(image) - - -def Rescale(image, scale, offset=0): - ''' - - Parameters - ---------- - image: - 3-D image or 4-D images - scale: - Float, the scale to apply to the inputs. - offset: - Float, the offset to apply to the inputs. - Returns: - rescaled images - ------- - ''' - image = tf.cast(image, dtype=tf.float32) - scale = tf.cast(scale, dtype=tf.float32) - offset = tf.cast(offset, dtype=tf.float32) - return image * scale + offset - - -def RandomFlipVertical(image): - - return tf.image.random_flip_up_down(image) - - -def RandomFlipHorizontal(image): - - return tf.image.random_flip_left_right(image) - - -def HWC2CHW(image): - - if (len(image.shape) == 3): - return Transpose(image, (2, 0, 1)) - elif (len(image.shape) == 4): - return Transpose(image, (0, 3, 1, 2)) - else: - raise ValueError('\'image\' must have either 3 or 4 dimensions.') - - -def CHW2HWC(image): - - if (len(image.shape) == 3): - return Transpose(image, (1, 2, 0)) - elif (len(image.shape) == 4): - return Transpose(image, (0, 2, 3, 1)) - else: - raise ValueError('\'image\' must have either 3 or 4 dimensions.') diff --git a/tensorlayer/dataflow/mindspore_data.py b/tensorlayer/dataflow/mindspore_data.py index 54e275f..9c12d87 100644 --- a/tensorlayer/dataflow/mindspore_data.py +++ b/tensorlayer/dataflow/mindspore_data.py @@ -5,131 +5,78 @@ import mindspore.dataset as ds import mindspore as ms from enum import Enum __all__ = [ - 'Apply', 'Batch', 'Concat', - 'CsvDataset', - 'Filter', - 'Flat_map', 'FromGenerator', 'FromSlices', 'Map', - 'Prefetch', 'Repeat', 'Shuffle', - 'Skip', - 'Take', - 'TextFlieDataset', - 'TFRecordDataset', 'Dataloader', + 'Dataset', + 'IterableDataset', ] -class Shuffle(str, Enum): - GLOBAL: str = "global" - FILES: str = "file" +class Dataset(object): + def __init__(self): + pass -def Apply(dataset, transformation_func): + def __getitem__(self, idx): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__getitem__', self.__class__.__name__)) - return dataset.apply(transformation_func) + def __len__(self): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__len__', self.__class__.__name__)) -def Batch( - dataset, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None, inut_columns=None, - output_columns=None, column_order=None, pad_info=None -): +class IterableDataset(object): + + def __init__(self): + pass + + def __iter__(self): + raise NotImplementedError("'{}' not implement in class " \ + "{}".format('__iter__', self.__class__.__name__)) + + +def Batch(dataset, batch_size, drop_last=False): ''' - Combine batch_size number of consecutive rows into batches. + Parameters ---------- dataset batch_size - drop_remainder - num_parallel_workers - per_batch_map - inut_columns - output_columns - column_order - pad_info - + drop_last Returns ------- ''' - return dataset.batch( - batch_size=batch_size, drop_remainder=drop_remainder, num_parallel_workers=num_parallel_workers, - per_batch_map=per_batch_map, input_columns=inut_columns, output_columns=output_columns, - column_order=column_order, pad_info=pad_info - ) - + return dataset.batch(batch_size=batch_size, drop_remainder=drop_last) -def Concat(dataset_1, dataset_2): - - return dataset_1.concat(dataset_2) - - -def CsvDataset( - file_pattern, batch_size=1, column_names=None, column_defaults=None, label_name=None, select_columns=None, - field_delim=',', use_quote_delim=True, na_value='', header=True, num_epochs=None, shuffle=Shuffle.GLOBAL, - shuffle_buffer_size=10000, shuffle_seed=None, prefetch_buffer_size=None, num_parallel_reads=None, sloppy=False, - num_rows_for_inference=100, compression_type=None, ignore_errors=False, numples_samples=None, num_shards=None, - shard_id=None, cache=None -): - """ - A source dataset that reads and parses comma-separated values (CSV) datasets. - - Examples: - >>> import mindspore.dataset as dataset - >>> - >>> dataset_files = ["/path/to/1", "/path/to/2"] # contains 1 or multiple text files - >>> dataset = dataset.CSVDataset(dataset_files=dataset_files, column_names=['col1', 'col2', 'col3', 'col4']) - """ - return ds.CSVDataset( - dataset_files=file_pattern, field_delim=field_delim, column_defaults=column_defaults, column_names=column_names, - num_samples=numples_samples, num_parallel_workers=num_parallel_reads, shuffle=shuffle, num_shards=num_shards, - shard_id=shard_id, cache=cache - ) +def Concat(datasets): -def Filter(dataset, predicate): - - return dataset.filter(predicate) - - -def Flat_map(dataset, map_func): - - return dataset.flat_map(map_func) + datasets = list(datasets) + dataset = ds.Dataset.concat(datasets) + return dataset -def FromGenerator( - generator, output_types, output_shapes=None, args=None, column_names=None, column_types=None, schema=None, - num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, shard_id=None, - python_multiprocessing=True -): +def FromGenerator(generator, output_types, column_names): - return ds.GeneratorDataset( - source=generator, column_names=column_names, column_types=column_types, schema=schema, num_samples=num_samples, - num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler, num_shards=num_shards, - shard_id=shard_id, python_multiprocessing=python_multiprocessing - ) + output_types = list(output_types) + column_names = list(column_names) + return ds.GeneratorDataset(source=generator, column_names=column_names, column_types=output_types) -def FromSlices( - tensor, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, - shard_id=None -): +def FromSlices(datas, column_names): - return ds.NumpySlicesDataset( - data=tensor, column_names=column_names, num_samples=num_samples, num_parallel_workers=num_parallel_workers, - shuffle=shuffle, sampler=sampler, num_shards=num_shards, shard_id=shard_id - ) + return ds.NumpySlicesDataset(data=datas, column_names=column_names) -def Map( - dataset, map_func, num_parallel_calls=None, input_columns=None, output_columns=None, column_order=None, - num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None -): +def Map(dataset, map_func, input_columns=None): """ Maps map_func across the elements of this dataset. Parameters @@ -144,19 +91,7 @@ def Map( ------- """ - return dataset.map( - operations=map_func, input_columns=input_columns, output_columns=output_columns, column_order=column_order, - num_parallel_workers=num_parallel_workers, python_multiprocessing=python_multiprocessing, cache=cache, - callbacks=callbacks - ) - - -def Prefetch(dataset, buffer_size): - - batch_size = dataset.get_batch_size() - prefetch_size = batch_size * buffer_size - - return dataset.config.set_prefetch_size(prefetch_size) + return dataset.map(operations=map_func, input_columns=input_columns) def Repeat(dataset, count=None): @@ -164,104 +99,11 @@ def Repeat(dataset, count=None): return dataset.repeat(count) -def Shuffle(dataset, buffer_size, seed=None, reshuffle_each_iteration=None): - - #dataset.config.set_seed(seed) +def Shuffle(dataset, buffer_size): return dataset.shuffle(buffer_size) -def Skip(dataset, count): - ''' - Creates a Dataset that skips count elements from this dataset. - Parameters - ---------- - dataset: - A dataset - count: - A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be skipped to form the new dataset. - - - Returns - ------- - - ''' - return dataset.skip(count) - - -def Take(dataset, count): - ''' - Creates a Dataset with at most count elements from this dataset. - Parameters - ---------- - dataset: - A dataset - count: - A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be taken to form the new dataset. - If count is -1, or if count is greater than the size of this dataset, the new dataset will contain all elements of this dataset. - Returns - ------- - - ''' - return dataset.take(count) - - -def TextFlieDataset( - filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, num_samples=None, shuffle=None, - num_shards=None, shard_id=None, cache=None -): - """ - A source dataset that reads and parses datasets stored on disk in text format. - The generated dataset has one column ['text']. - - Examples: - >>> import mindspore.dataset as dataset - >>> - >>> dataset_files = ["/path/to/1", "/path/to/2"] # contains 1 or multiple text files - >>> dataset = dataset.TextFileDataset(dataset_files=dataset_files) - """ - if shuffle is None: - shuffle = Shuffle.GLOBAL - return ds.TextFileDataset( - dataset_files=filenames, num_samples=num_samples, num_parallel_workers=num_parallel_reads, shuffle=shuffle, - num_shards=num_shards, shard_id=shard_id, cache=cache - ) - - -def TFRecordDataset( - filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, schema=None, columns_list=None, - num_samples=None, shuffle=None, num_shards=None, shard_id=None, shard_equal_rows=False, cache=None -): - """ - A source dataset that reads and parses datasets stored on disk in TFData format. - - Examples: - >>> import mindspore.dataset as dataset - >>> import mindspore.common.dtype as mstype - >>> - >>> dataset_files = ["/path/to/1", "/path/to/2"] # contains 1 or multiple tf data files - >>> - >>> # 1) Get all rows from dataset_files with no explicit schema - >>> # The meta-data in the first row will be used as a schema. - >>> tfdataset = dataset.TFRecordDataset(dataset_files=dataset_files) - >>> - >>> # 2) Get all rows from dataset_files with user-defined schema - >>> schema = dataset.Schema() - >>> schema.add_column('col_1d', de_type=mindspore.int64, shape=[2]) - >>> tfdataset = dataset.TFRecordDataset(dataset_files=dataset_files, schema=schema) - >>> - >>> # 3) Get all rows from dataset_files with schema file "./schema.json" - >>> tfdataset = dataset.TFRecordDataset(dataset_files=dataset_files, schema="./schema.json") - """ - if shuffle is None: - shuffle = Shuffle.GLOBAL - return ds.TFRecordDataset( - dataset_files=filenames, schema=schema, columns_list=columns_list, num_samples=num_samples, - num_parallel_workers=num_parallel_reads, shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, - shard_equal_rows=shard_equal_rows, cache=cache - ) - - def Zip(datasets): ''' Creates a Dataset by zipping together the given datasets. @@ -273,15 +115,14 @@ def Zip(datasets): ------- ''' + datasets = tuple(datasets) return ds.zip(datasets) -def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=0): +def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, shuffle_buffer_size=10000): if shuffle: dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size) - - dataset = Batch(dataset, batch_size=batch_size, drop_remainder=drop_last) - dataset = Prefetch(dataset, buffer_size=prefetch) + dataset = Batch(dataset, batch_size=batch_size, drop_last=drop_last) return dataset diff --git a/tensorlayer/dataflow/paddle_data.py b/tensorlayer/dataflow/paddle_data.py index d001d56..d442a8f 100644 --- a/tensorlayer/dataflow/paddle_data.py +++ b/tensorlayer/dataflow/paddle_data.py @@ -3,129 +3,96 @@ import numpy as np import paddle -from paddle.io import Dataset, BatchSampler, DataLoader, IterableDataset +from paddle.io import Dataset as dataset +from paddle.io import IterableDataset as iterabledataset +from paddle.io import DataLoader __all__ = [ + 'Batch', 'Concat', 'FromGenerator', 'FromSlices', 'Map', - # 'Shuffle', - # 'Batch', + 'Repeat', + 'Shuffle', 'Dataloader', + 'Dataset', + 'IterableDataset', ] -def to_list(value): - if value is None: - return value - if isinstance(value, (list, tuple)): - return list(value) - return [value] +class Dataset(dataset): + def __init__(self): + pass -class FromGenerator(Dataset): + def __getitem__(self, idx): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__getitem__', self.__class__.__name__)) - def __init__(self, generator): + def __len__(self): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__len__', self.__class__.__name__)) - if not callable(generator): - raise TypeError("'generator' must be callable") - self.generator = generator() - self.datas = [] - self.labels = [] - for data, label in self.generator: - self.datas.append(data) - self.labels.append(label) - def __getitem__(self, idx): +class IterableDataset(iterabledataset): - x = self.datas[idx] - y = self.labels[idx] + def __init__(self): + pass + + def __iter__(self): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__iter__', self.__class__.__name__)) - return x, y + def __getitem__(self, idx): + raise RuntimeError("'{}' should not be called for IterableDataset" \ + "{}".format('__getitem__', self.__class__.__name__)) def __len__(self): + raise RuntimeError("'{}' should not be called for IterableDataset" \ + "{}".format('__len__', self.__class__.__name__)) - return self.datas.shape[0] +def FromGenerator(generator, output_types=None, column_names=None): -class FromSlices(Dataset): + return generator - def __init__(self, datas, transform = None): - self.datas = datas[0] - self.labels = datas[1] - self.transform = transform - if len(self.datas) != len(self.labels): - raise ValueError('Datas and labels not have same shape of the 1st dimension.') +def FromSlices(datas, column_names=None): - def __getitem__(self, idx): - data = paddle.to_tensor(self.datas[idx], dtype='float32') - label = paddle.to_tensor(self.labels[idx], dtype='int64') - if self.transform is not None: - data = self.transform(data) - return data, label + datas = list(datas) + return paddle.io.TensorDataset(datas) - def __len__(self): - return len(self.datas) +def Concat(datasets): + return paddle.io.ChainDataset(list(datasets)) -class Concat(IterableDataset): - def __init__(self, datasets): - self.datasets = list(datasets) - assert len(self.datasets) > 0, "input datasets shoule not be empty" - for i, dataset in enumerate(self.datasets): - assert isinstance(dataset, IterableDataset), \ - "ChainDataset only support paddle.io.IterableDataset" +def Zip(datasets): - def __iter__(self): - for dataset in self.datasets: - for sample in dataset: - yield sample - - -class Map(Dataset): - - def __init__(self, dataset, transform): - self.isDataset = False - self.transform = transform - if isinstance(dataset, Dataset): - self.isDataset = True - self.dataset = dataset - elif isinstance(dataset, list) or isinstance(dataset, tuple): - self.datas = dataset[0] - self.labels = dataset[1] - else: - raise TypeError( - " 'dataset' should be subclass instance of paddle.io.Dataset " - "or a [data, label] list/tulpe, not a {}".format(type(dataset)) - ) + return paddle.io.ComposeDataset(list(datasets)) - def __getitem__(self, idx): - if self.isDataset: - x = self.dataset[idx][0] - if not isinstance(x, np.ndarray): - x = np.asarray(x) - x = self.transform(x) - y = self.dataset[idx][1] - else: - x = self.datas[idx] - if not isinstance(x, np.ndarray): - x = np.asarray(x) - x = self.transform(x) - y = self.labels[idx] - - return x, y - def __len__(self): +def Dataloader(dataset, batch_size=None, shuffle=False, drop_last=False, shuffle_buffer_size=0): + + return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last, return_list=True) + + +def Batch(dataset, batch_size, drop_last=False): + + raise NotImplementedError('This function not implement in paddle backend.') + + +def Shuffle(dataset, buffer_size, seed=None): + + raise NotImplementedError('This function not implement in paddle backend.') + + +def Repeat(dataset, count=None): - if self.isDataset: - return len(self.dataset[0]) - else: - return len(self.datas) + raise NotImplementedError('This function not implement in paddle backend.') -def Dataloader(dataset, batch_size=None, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=0): +def Map(dataset, map_func, input_columns=None): - return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last) + raise NotImplementedError('This function not implement in paddle backend.') diff --git a/tensorlayer/dataflow/tensorflow_data.py b/tensorlayer/dataflow/tensorflow_data.py index 4da229a..3125650 100644 --- a/tensorlayer/dataflow/tensorflow_data.py +++ b/tensorlayer/dataflow/tensorflow_data.py @@ -2,265 +2,357 @@ # -*- coding: utf-8 -*- import tensorflow as tf - +import tensorlayer as tl +import numpy as np __all__ = [ - 'Apply', 'Batch', 'Concat', - 'CsvDataset', - 'Filter', - 'Flat_map', 'FromGenerator', 'FromSlices', 'Map', - 'Prefetch', 'Repeat', 'Shuffle', - 'Skip', - 'Take', - 'TextFlieDataset', - 'TFRecordDataset', 'Zip', 'Dataloader', + 'Dataset', + 'IterableDataset', ] -def Apply(dataset, transformation_func): - """Applies a transformation function to this dataset. - `apply` enables chaining of custom `Dataset` transformations, which are - represented as functions that take one `Dataset` argument and return a - transformed `Dataset`. - >>> dataset = tf.data.Dataset.range(100) - >>> def dataset_fn(dataset): - ... return dataset.filter(lambda x: x < 5) - >>> dataset = dataset.apply(dataset_fn) - >>> list(dataset.as_numpy_iterator()) - [0, 1, 2, 3, 4] - Args: - transformation_func: A function that takes one `Dataset` argument and - returns a `Dataset`. - Returns: - Dataset: The `Dataset` returned by applying `transformation_func` to this - dataset. - """ - return dataset.apply(transformation_func) - - -def Batch(dataset, batch_size, drop_remainder=False): - ''' +class Dataset(object): + """An abstract class to encapsulate methods and behaviors of datasets. + All datasets in map-style(dataset samples can be get by a given key) should be a subclass of 'tensorlayer.dataflow.Dataset'. + ALl subclasses should implement following methods: + :code:`__getitem__`: get sample from dataset with a given index. + :code:`__len__`: return dataset sample number. - Parameters + Examples ---------- - dataset - batch_size - drop_remainder + With TensorLayer + + >>> from tensorlayer.dataflow import Dataset + >>> class mnistdataset(Dataset): + >>> def __init__(self, data, label,transform): + >>> self.data = data + >>> self.label = label + >>> self.transform = transform + >>> def __getitem__(self, index): + >>> data = self.data[index].astype('float32') + >>> data = self.transform(data) + >>> label = self.label[index].astype('int64') + >>> return data, label + >>> def __len__(self): + >>> return len(self.data) + >>> train_dataset = mnistdataset(data = X_train, label = y_train ,transform = transform) - Returns - ------- + """ - ''' - return dataset.batch(batch_size=batch_size, drop_remainder=drop_remainder) + def __init__(self): + pass + def __call__(self): -def Concat(dataset_1, dataset_2): + return self - return dataset_1.concatenate(dataset_2) + def __getitem__(self, idx): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__getitem__', self.__class__.__name__)) + def __len__(self): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__len__', self.__class__.__name__)) + + +class IterableDataset(object): + """An abstract class to encapsulate methods and behaviors of iterable datasets. + All datasets in iterable-style (can only get sample one by one sequentially, likea Python iterator) should be a subclass of `tensorlayer.dataflow.IterableDataset`. + All subclasses should implement following methods: + :code:`__iter__`: yield sample sequentially. + + Examples + ---------- + With TensorLayer + + >>> class mnistdataset(IterableDataset): + >>> def __init__(self, data, label,transform): + >>> self.data = data + >>> self.label = label + >>> self.transform = transform + >>> def __iter__(self): + >>> for i in range(len(self.data)): + >>> data = self.data[i].astype('float32') + >>> data = self.transform(data) + >>> label = self.label[i].astype('int64') + >>> yield data, label + >>> train_dataset = mnistdataset(data = X_train, label = y_train ,transform = transform) -def CsvDataset( - file_pattern, batch_size=1, column_names=None, column_defaults=None, label_name=None, select_columns=None, - field_delim=',', use_quote_delim=True, na_value='', header=True, num_epochs=None, shuffle=True, - shuffle_buffer_size=10000, shuffle_seed=None, prefetch_buffer_size=None, num_parallel_reads=None, sloppy=False, - num_rows_for_inference=100, compression_type=None, ignore_errors=False, numples_samples=None, num_shards=None, - shard_id=None, cache=None -): - """Reads CSV files into a dataset. - Reads CSV files into a dataset, where each element is a (features, labels) - tuple that corresponds to a batch of CSV rows. The features dictionary - maps feature column names to `Tensor`s containing the corresponding - feature data, and labels is a `Tensor` containing the batch's label data. """ - return tf.data.experimental.make_csv_dataset( - file_pattern, batch_size, column_names=None, column_defaults=None, label_name=None, select_columns=None, - field_delim=',', use_quote_delim=True, na_value='', header=True, num_epochs=None, shuffle=True, - shuffle_buffer_size=10000, shuffle_seed=None, prefetch_buffer_size=None, num_parallel_reads=None, sloppy=False, - num_rows_for_inference=100, compression_type=None, ignore_errors=False - ) + def __init__(self): + pass + + def __call__(self): + + return self + + def __iter__(self): + raise NotImplementedError("'{}' not implement in class "\ + "{}".format('__iter__', self.__class__.__name__)) + + +def FromGenerator(generator, output_types, column_names=None): + """Creates a `Dataset` whose elements are generated by `generator`. -def Filter(dataset, predicate): - ''' - Filters this dataset according to predicate. Parameters ---------- - dataset : - A dataset - predicate : - A function mapping a dataset element to a boolean. - Returns : - The Dataset containing the elements of this dataset for which predicate is True. + generator: Callable or Iterable + A generator callable object or an iterable Python object. + output_types: list or tuple + Set output data type. This parameter not support in MindSpore backend and Paddle backend. + column_names: list or tuple + column names of the dataset. This parameter not support in TensorFlow backend and Paddle backend. + + Returns ------- + Dataset + A Dataset. + + Examples + ---------- + With TensorLayer - ''' - return dataset.filter(predicate) + >>> train_dataset = mnistdataset(data = X_train, label = y_train ,transform = transform) + >>> train_dataset = tl.dataflow.FromGenerator(train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label']) + """ + output_types = tuple(output_types) + return tf.data.Dataset.from_generator(generator, output_types=output_types) + + +def Batch(dataset, batch_size, drop_last=False): + """Combine batch_size number of consecutive rows into batches.This function not implement in Paddle backend. -def Flat_map(dataset, map_func): - ''' - Maps map_func across this dataset and flattens the result. Parameters ---------- dataset: - A dataset - map_func - A function mapping a dataset element to a dataset. + A dataset. + batch_size: int + Sample number in a mini-batch. + drop_last: boolean + whether drop the last incomplete batch dataset size is not divisible by the batch size. + Returns - A Dataset. ------- + Dataset + A batchDataset. + """ - ''' - return dataset.flat_map(map_func) + return dataset.batch(batch_size=batch_size, drop_remainder=drop_last) -def FromGenerator( - generator, output_types, output_shapes=None, args=None, column_names=None, column_types=None, schema=None, - num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, shard_id=None, - python_multiprocessing=True -): - """Creates a `Dataset` whose elements are generated by `generator`. +def Concat(datasets): + """Concatenate the datasets in the input list of datasets. + + Parameters + ---------- + datasets: dataset + A list of datasets. + + Returns + ------- + Dataset + datasets concatenated. + + Examples + ---------- + With TensorLayer + + >>> dataset = tl.dataflow.Concat([dataset1, dataset2]) - generator: - A callable object """ - return tf.data.Dataset.from_generator(generator, output_types, output_shapes=output_shapes, args=args) + dataset_num = len(datasets) + dataset = datasets[0] + for i in range(1, dataset_num): + dataset.concatenate(datasets[i]) + return dataset + + +def FromSlices(datas, column_names=None): + """Creates a dataset with given data slices. + + Parameters + ---------- + datas: list or tuple + Each data should be in shape of [N, …], while N is the sample number. + Input data will be sliced along the first dimension and generate additional rows + column_names: list + List of column names of the dataset. This parameter not support in TensorFlow backend and Paddle backend. + + Returns + ------- + Dataset + A dataset. -def FromSlices( - tensor, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, - shard_id=None -): + Examples + ---------- + With TensorLayer + + >>> dataset = tl.dataflow.FromSlices([data1, data2]) - return tf.data.Dataset.from_tensor_slices(tensor) + """ + return tf.data.Dataset.from_tensor_slices(datas) -def Map( - dataset, map_func, num_parallel_calls=None, input_columns=None, output_columns=None, column_order=None, - num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None -): - """ Maps map_func across the elements of this dataset. + +def Map(dataset, map_func, input_columns=None): + """ Maps map_func across the elements of this dataset. This function not implement in Paddle backend. Parameters ---------- - dataset : DataFlow - input DataFlow + dataset : Dataset + A dataset to map. map_func : function A function mapping a dataset element to another dataset element. - num_parallel_calls + input_columns: list + List of column names of the dataset to map. This parameter not support in TensorFlow backend. Returns ------- + Dataset + A mapped dataset. + + Examples + ---------- + With TensorLayer + + >>> dataset = tl.dataflow.Map(dataset, map_func) """ - return dataset.map(map_func, num_parallel_calls=num_parallel_calls) + return dataset.map(map_func) -def Prefetch(dataset, buffer_size): - ''' - Creates a Dataset that prefetches elements from this dataset. +def Repeat(dataset, count=None): + """ Repeat this dataset count times. This function not implement in Paddle backend. + Parameters ---------- - dataset: Dataflow - A dataset - buffer_size : - A tf.int64 scalar tf.Tensor, representing the maximum number of elements that will be buffered when prefetching. + dataset : Dataset + A dataset to repeat. + count : int + The number of times the dataset should be repeated. The default behavior (if count is None or -1) is for the dataset be repeated indefinitely. + Returns - A Dataset ------- + Dataset + A repeated dataset. - ''' - return dataset.prefetch(buffer_size=buffer_size) + Examples + ---------- + With TensorLayer + >>> dataset = tl.dataflow.Repeat(dataset, 2) -def Repeat(dataset, count=None): + """ return dataset.repeat(count=count) -def Shuffle(dataset, buffer_size, seed=None, reshuffle_each_iteration=None): - return dataset.shuffle(buffer_size, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration) +def Shuffle(dataset, buffer_size): + """ Randomly shuffles the elements of this dataset.This function not implement in Paddle backend. - -def Skip(dataset, count): - ''' - Creates a Dataset that skips count elements from this dataset. Parameters ---------- - dataset: - A dataset - count: - A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be skipped to form the new dataset. - If count is greater than the size of this dataset, the new dataset will contain no elements. - If count is -1, skips the entire dataset. + dataset : Dataset + A dataset to shuffle. + buffer_size : int + The number of elements from this dataset from which the new dataset will sample. Returns ------- + Dataset + A shuffled dataset. + + Examples + ---------- + With TensorLayer + + >>> dataset = tl.dataflow.Shuffle(dataset, 2000) + + """ + return dataset.shuffle(buffer_size, seed=None, reshuffle_each_iteration=True) - ''' - return dataset.skip(count) +def Zip(datasets): + """ Creates a Dataset by zipping together the given datasets.This function not implement in Paddle backend. -def Take(dataset, count): - ''' - Creates a Dataset with at most count elements from this dataset. Parameters ---------- - dataset: - A dataset - count: - A tf.int64 scalar tf.Tensor, representing the number of elements of this dataset that should be taken to form the new dataset. - If count is -1, or if count is greater than the size of this dataset, the new dataset will contain all elements of this dataset. + datasets : list + A list of datasets to zip. + Returns ------- + Dataset + A zip dataset. - ''' - return dataset.take(count) - - -def TextFlieDataset( - filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, num_samples=None, shuffle=None, - num_shards=None, shard_id=None, cache=None -): - - return tf.data.TextLineDataset(filenames, compression_type, buffer_size, num_parallel_reads) + Examples + ---------- + With TensorLayer + >>> dataset = tl.dataflow.Zip([dataset1, dataset2]) -def TFRecordDataset( - filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, schema=None, columns_list=None, - num_samples=None, shuffle=None, num_shards=None, shard_id=None, shard_equal_rows=False, cache=None -): + """ + return tf.data.Dataset.zip(datasets) - return tf.data.TFRecordDataset(filenames, compression_type, buffer_size, num_parallel_reads) +def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, shuffle_buffer_size=10000): + """ Creates a Datasetloader to trian network. We recommend using this function. -def Zip(datasets): - ''' - Creates a Dataset by zipping together the given datasets. Parameters ---------- - datasets: - A tuple of datasets to be zipped together. + dataset : Dataset + the dataset to load data from. + batch_size: int or None + sample number in a mini-batch. + shuffle: boolean + whther to shuffle indices order before genrate batch indices. + drop_last: boolean + whether drop the last incomplete batch dataset size is not divisible by the batch size. + shuffle_buffer_size: int + The number of elements from this dataset from which the new dataset will sample. This parameter not support in Paddle backend. + Returns ------- + DataLoader + an iterable object for data iterating, each elemnet of the generated data is a Tensor. - ''' - return tf.data.Dataset.zip(datasets) - + Examples + ---------- + With TensorLayer + + >>> from tensorlayer.dataflow import Dataset + >>> class mnistdataset(Dataset): + >>> def __init__(self, data, label,transform): + >>> self.data = data + >>> self.label = label + >>> self.transform = transform + >>> def __getitem__(self, index): + >>> data = self.data[index].astype('float32') + >>> data = self.transform(data) + >>> label = self.label[index].astype('int64') + >>> return data, label + >>> def __len__(self): + >>> return len(self.data) + >>> train_dataset = mnistdataset(data = X_train, label = y_train ,transform = transform) + >>> train_dataset = tl.dataflow.FromGenerator(train_dataset, output_types=[tl.float32, tl.int64], column_names=['data', 'label']) + >>> train_dataloader = tl.dataflow.Dataloader(train_dataset, batch_size=128, shuffle=True, drop_last=False, shuffle_buffer_size=2000) -def Dataloader(dataset, batch_size, shuffle=False, drop_last=False, prefetch=0, shuffle_buffer_size=1024): + """ if shuffle: - dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size, reshuffle_each_iteration=True) + dataset = Shuffle(dataset, buffer_size=shuffle_buffer_size) - dataset = Batch(dataset, batch_size=batch_size, drop_remainder=drop_last) - dataset = Prefetch(dataset, buffer_size=prefetch) + dataset = Batch(dataset, batch_size=batch_size, drop_last=drop_last) + dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) return dataset diff --git a/tensorlayer/files/utils.py b/tensorlayer/files/utils.py index d05a0c3..1350577 100644 --- a/tensorlayer/files/utils.py +++ b/tensorlayer/files/utils.py @@ -37,6 +37,8 @@ if tl.BACKEND == 'mindspore': from mindspore.nn import Cell from mindspore import Tensor import mindspore as ms +if tl.BACKEND == 'paddle': + import paddle as pd if sys.version_info[0] == 2: from urllib import urlretrieve @@ -74,6 +76,7 @@ __all__ = [ 'ms_variables_to_numpy', 'assign_tf_variable', 'assign_ms_variable', + 'assign_pd_variable', 'save_weights_to_hdf5', 'load_hdf5_to_weights_in_order', 'load_hdf5_to_weights', @@ -2098,6 +2101,8 @@ def save_npz_dict(save_list=None, name='model.npz'): save_list_var = tf_variables_to_numpy(save_list) elif tl.BACKEND == 'mindspore': save_list_var = ms_variables_to_numpy(save_list) + elif tl.BACKEND == 'paddle': + save_list_var = pd_variables_to_numpy(save_list) else: raise NotImplementedError('Not implemented') save_var_dict = {save_list_names[idx]: val for idx, val in enumerate(save_list_var)} @@ -2148,6 +2153,11 @@ def load_and_assign_npz_dict(name='model.npz', network=None, skip=False): elif tl.BACKEND == 'mindspore': assign_param = Tensor(weights[key], dtype=ms.float32) assign_ms_variable(network.all_weights[net_weights_name.index(key)], assign_param) + elif tl.BACKEND == 'paddle': + assign_pd_variable(network.all_weights[net_weights_name.index(key)], weights[key]) + else: + raise NotImplementedError('Not implemented') + logging.info("[*] Model restored from npz_dict %s" % name) @@ -2594,6 +2604,16 @@ def ms_variables_to_numpy(variables): return results +def pd_variables_to_numpy(variables): + if not isinstance(variables, list): + var_list = [variables] + else: + var_list = variables + + results = [v.numpy() for v in var_list] + return results + + def assign_tf_variable(variable, value): """Assign value to a TF variable""" variable.assign(value) @@ -2615,6 +2635,10 @@ def assign_ms_variable(variable, value): Assign()(variable, value) +def assign_pd_variable(variable, value): + pd.assign(value, variable) + + def _save_weights_to_hdf5_group(f, layers): """ Save layer/model weights into hdf5 group recursively. diff --git a/tensorlayer/initializers/__init__.py b/tensorlayer/initializers/__init__.py index 80557bd..ef8c65f 100644 --- a/tensorlayer/initializers/__init__.py +++ b/tensorlayer/initializers/__init__.py @@ -5,7 +5,7 @@ # 'Initializer', 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal', # 'deconv2d_bilinear_upsampling_initializer', 'He_Normal' # ] - +from .load_initializers_backend import Initializer from .load_initializers_backend import Zeros from .load_initializers_backend import Ones from .load_initializers_backend import Constant @@ -22,4 +22,4 @@ constant = Constant random_uniform = RandomUniform random_normal = RandomNormal truncated_normal = TruncatedNormal -he_normal = HeNormal \ No newline at end of file +he_normal = HeNormal diff --git a/tensorlayer/initializers/load_initializers_backend.py b/tensorlayer/initializers/load_initializers_backend.py index fc65bab..3f5492d 100644 --- a/tensorlayer/initializers/load_initializers_backend.py +++ b/tensorlayer/initializers/load_initializers_backend.py @@ -7,9 +7,7 @@ from tensorlayer.backend.ops.load_backend import BACKEND if BACKEND == 'tensorflow': from .tensorflow_initializers import * elif BACKEND == 'mindspore': - from .tensorflow_initializers import * -elif BACKEND == 'dragon': - from .tensorflow_initializers import * + from .mindspore_initializers import * elif BACKEND == 'paddle': from .paddle_initializers import * else: diff --git a/tensorlayer/initializers/mindspore_initializers.py b/tensorlayer/initializers/mindspore_initializers.py new file mode 100644 index 0000000..4c0ef84 --- /dev/null +++ b/tensorlayer/initializers/mindspore_initializers.py @@ -0,0 +1,258 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import numpy as np +import tensorlayer as tl +from mindspore import Tensor +from mindspore.common import initializer + +__all__ = [ + 'Initializer', 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal', + 'deconv2d_bilinear_upsampling_initializer', 'HeNormal' +] + + +class Initializer(object): + """Initializer base class: all initializers inherit from this class. + """ + + def __call__(self, shape, dtype=None): + """Returns a tensor object initialized as specified by the initializer. + + Parameters + ---------- + shape : tuple of int. + The shape of the tensor. + dtype : Optional dtype of the tensor. + If not provided will return tensor of `tl.float32`. + + Returns + ------- + + """ + raise NotImplementedError + + def get_config(self): + """Returns the configuration of the initializer as a JSON-serializable dict. + + Returns + ------- + A JSON-serializable Python dict. + """ + return {} + + @classmethod + def from_config(cls, config): + """Instantiates an initializer from a configuration dictionary. + + Parameters + ---------- + config : A python dictionary. + It will typically be the output of `get_config`. + + Returns + ------- + An Initializer instance. + """ + if 'dtype' in config: + config.pop('dtype') + return cls(**config) + + +class Zeros(Initializer): + """Initializer that generates tensors initialized to 0. + """ + + def __init__(self): + self.zero = initializer.Zero() + + def __call__(self, shape, dtype=tl.float32): + arr = np.ndarray(shape) + self.zero(arr) + return Tensor(arr, dtype=dtype) + + +class Ones(Initializer): + """Initializer that generates tensors initialized to 1. + """ + + def __init__(self): + self.one = initializer.One() + + def __call__(self, shape, dtype=tl.float32): + arr = np.ndarray(shape) + self.one(arr) + return Tensor(arr, dtype=dtype) + + +class Constant(Initializer): + """Initializer that generates tensors initialized to a constant value. + + Parameters + ---------- + value : A python scalar or a numpy array. + The assigned value. + + """ + + def __init__(self, value=0): + self.value = value + self.constant = initializer.Constant(value=value) + + def __call__(self, shape, dtype=tl.float32): + arr = np.ndarray(shape) + self.constant(arr) + return Tensor(arr, dtype=dtype) + + def get_config(self): + return {"value": self.value} + + +class RandomUniform(Initializer): + """Initializer that generates tensors with a uniform distribution. + + Parameters + ---------- + minval : A python scalar or a scalar tensor. + Lower bound of the range of random values to generate. + maxval : A python scalar or a scalar tensor. + Upper bound of the range of random values to generate. + seed : A Python integer. + Used to seed the random generator. + + """ + + def __init__(self, minval=-0.05, maxval=0.05, seed=None): + self.minval = minval + self.maxval = maxval + self.seed = seed + + def __call__(self, shape, dtype=tl.float32): + return tl.random_uniform(shape, self.minval, self.maxval, dtype=dtype, seed=self.seed) + + def get_config(self): + return {"minval": self.minval, "maxval": self.maxval, "seed": self.seed} + + +class RandomNormal(Initializer): + """Initializer that generates tensors with a normal distribution. + + Parameters + ---------- + mean : A python scalar or a scalar tensor. + Mean of the random values to generate. + stddev : A python scalar or a scalar tensor. + Standard deviation of the random values to generate. + seed : A Python integer. + Used to seed the random generator. + """ + + def __init__(self, mean=0.0, stddev=0.05, seed=None): + self.mean = mean + self.stddev = stddev + self.seed = seed + + def __call__(self, shape, dtype=tl.float32): + return tl.random_normal(shape, self.mean, self.stddev, dtype=dtype, seed=self.seed) + + def get_config(self): + return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed} + + +class TruncatedNormal(Initializer): + """Initializer that generates a truncated normal distribution. + + These values are similar to values from a `RandomNormal` + except that values more than two standard deviations from the mean + are discarded and re-drawn. This is the recommended initializer for + neural network weights and filters. + + + Parameters + ---------- + mean : A python scalar or a scalar tensor. + Mean of the random values to generate. + stddev : A python scalar or a scalar tensor. + Standard deviation of the andom values to generate. + seed : A Python integer. + Used to seed the random generator. + """ + + def __init__(self, mean=0.0, stddev=0.05, seed=None): + self.mean = mean + self.stddev = stddev + self.seed = seed + + def __call__(self, shape, dtype=tl.float32): + return tl.truncated_normal(shape, self.mean, self.stddev, dtype=dtype, seed=self.seed) + + def get_config(self): + return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed} + + +class HeNormal(Initializer): + """He normal initializer. + + Parameters + ---------- + seed : A Python integer. + Used to seed the random generator. + + """ + + def __init__(self, seed=None): + self.seed = seed + + def __call__(self, shape, dtype=tl.float32): + return tl.he_normal(seed=self.seed, shape=shape, dtype=dtype) + + def get_config(self): + return {"seed", self.seed} + + +def deconv2d_bilinear_upsampling_initializer(shape): + """Returns the initializer that can be passed to DeConv2dLayer for initializing the + weights in correspondence to channel-wise bilinear up-sampling. + Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211) + + Parameters + ---------- + shape : tuple of int + The shape of the filters, [height, width, output_channels, in_channels]. + It must match the shape passed to DeConv2dLayer. + + Returns + ------- + ``tf.constant_initializer`` + A constant initializer with weights set to correspond to per channel bilinear upsampling + when passed as W_int in DeConv2dLayer + + """ + if shape[0] != shape[1]: + raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes') + + if shape[3] < shape[2]: + raise Exception( + 'deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels ' + ) + + filter_size = shape[0] + num_out_channels = shape[2] + num_in_channels = shape[3] + + # Create bilinear filter kernel as numpy array + bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32) + scale_factor = (filter_size + 1) // 2 + if filter_size % 2 == 1: + center = scale_factor - 1 + else: + center = scale_factor - 0.5 + for x in range(filter_size): + for y in range(filter_size): + bilinear_kernel[x, y] = (1 - abs(x - center) / scale_factor) * (1 - abs(y - center) / scale_factor) + weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels), dtype=np.float32) + for i in range(num_out_channels): + weights[:, :, i, i] = bilinear_kernel + + # assign numpy array to constant_initalizer and pass to get_variable + return Constant(value=weights) diff --git a/tensorlayer/initializers/paddle_initializers.py b/tensorlayer/initializers/paddle_initializers.py index 22ffa7a..496cf7f 100644 --- a/tensorlayer/initializers/paddle_initializers.py +++ b/tensorlayer/initializers/paddle_initializers.py @@ -9,11 +9,58 @@ from paddle.fluid.initializer import MSRAInitializer import paddle __all__ = [ - 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal', + 'Initializer', 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal', 'deconv2d_bilinear_upsampling_initializer', 'HeNormal' ] +class Initializer(object): + """Initializer base class: all initializers inherit from this class. + """ + + def __call__(self, shape, dtype=None): + """Returns a tensor object initialized as specified by the initializer. + + Parameters + ---------- + shape : tuple of int. + The shape of the tensor. + dtype : Optional dtype of the tensor. + If not provided will return tensor of `tl.float32`. + + Returns + ------- + + """ + raise NotImplementedError + + def get_config(self): + """Returns the configuration of the initializer as a JSON-serializable dict. + + Returns + ------- + A JSON-serializable Python dict. + """ + return {} + + @classmethod + def from_config(cls, config): + """Instantiates an initializer from a configuration dictionary. + + Parameters + ---------- + config : A python dictionary. + It will typically be the output of `get_config`. + + Returns + ------- + An Initializer instance. + """ + if 'dtype' in config: + config.pop('dtype') + return cls(**config) + + class Zeros(ConstantInitializer): """Initializer that generates tensors initialized to 0. """ @@ -72,8 +119,7 @@ class RandomUniform(UniformInitializer): assert minval is not None, 'low should not be None' assert maxval is not None, 'high should not be None' assert maxval >= minval, 'high should greater or equal than low' - super(RandomUniform, self).__init__( - low=minval, high=maxval, seed=seed, diag_num=0, diag_step=0, diag_val=1.0) + super(RandomUniform, self).__init__(low=minval, high=maxval, seed=seed, diag_num=0, diag_step=0, diag_val=1.0) self.minval = minval self.maxval = maxval self.seed = seed @@ -149,8 +195,7 @@ class HeNormal(MSRAInitializer): """ def __init__(self, seed=0): - super(HeNormal, self).__init__( - uniform=False, fan_in=None, seed=seed) + super(HeNormal, self).__init__(uniform=False, fan_in=None, seed=seed) self.seed = seed def get_config(self): diff --git a/tensorlayer/initializers/tensorflow_initializers.py b/tensorlayer/initializers/tensorflow_initializers.py index 8865216..5009969 100644 --- a/tensorlayer/initializers/tensorflow_initializers.py +++ b/tensorlayer/initializers/tensorflow_initializers.py @@ -59,6 +59,14 @@ class Initializer(object): class Zeros(Initializer): """Initializer that generates tensors initialized to 0. + + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.zeros() + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __call__(self, shape, dtype=tl.float32): @@ -67,6 +75,14 @@ class Zeros(Initializer): class Ones(Initializer): """Initializer that generates tensors initialized to 1. + + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.ones() + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __call__(self, shape, dtype=tl.float32): @@ -81,6 +97,13 @@ class Constant(Initializer): value : A python scalar or a numpy array. The assigned value. + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.constant(value=10) + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __init__(self, value=0): @@ -105,6 +128,13 @@ class RandomUniform(Initializer): seed : A Python integer. Used to seed the random generator. + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.random_uniform(minval=-0.05, maxval=0.05) + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __init__(self, minval=-0.05, maxval=0.05, seed=None): @@ -130,6 +160,16 @@ class RandomNormal(Initializer): Standard deviation of the random values to generate. seed : A Python integer. Used to seed the random generator. + + minval=-0.05, maxval=0.05 + + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.random_normal(mean=0.0, stddev=0.05) + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __init__(self, mean=0.0, stddev=0.05, seed=None): @@ -161,6 +201,14 @@ class TruncatedNormal(Initializer): Standard deviation of the andom values to generate. seed : A Python integer. Used to seed the random generator. + + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.truncated_normal(mean=0.0, stddev=0.05) + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __init__(self, mean=0.0, stddev=0.05, seed=None): @@ -183,6 +231,13 @@ class HeNormal(Initializer): seed : A Python integer. Used to seed the random generator. + Examples + -------- + + >>> import tensorlayer as tl + >>> init = tl.initializers.he_normal() + >>> print(init(shape=(5, 10), dtype=tl.float32)) + """ def __init__(self, seed=None): diff --git a/tensorlayer/layers/__init__.py b/tensorlayer/layers/__init__.py index 309d586..d670243 100644 --- a/tensorlayer/layers/__init__.py +++ b/tensorlayer/layers/__init__.py @@ -18,7 +18,7 @@ from .normalization import * from .padding import * from .pooling import * from .quantize import * -# from .recurrent import * +from .recurrent import * from .scale import * from .shape import * from .spatial_transformer import * diff --git a/tensorlayer/layers/activation.py b/tensorlayer/layers/activation.py index c5a0de3..f429847 100644 --- a/tensorlayer/layers/activation.py +++ b/tensorlayer/layers/activation.py @@ -7,16 +7,7 @@ from tensorlayer.initializers import truncated_normal from tensorlayer.layers.core import Module __all__ = [ - 'PRelu', - 'PRelu6', - 'PTRelu6', - 'LeakyReLU', - 'LeakyReLU6', - 'LeakyTwiceRelu6', - 'Ramp', - 'Swish', - 'HardTanh', - 'Mish' + 'PRelu', 'PRelu6', 'PTRelu6', 'LeakyReLU', 'LeakyReLU6', 'LeakyTwiceRelu6', 'Ramp', 'Swish', 'HardTanh', 'Mish' ] @@ -41,7 +32,7 @@ class PRelu(Module): Examples ----------- >>> inputs = tl.layers.Input([10, 5]) - >>> prelulayer = tl.layers.PRelu(channel_shared=True) + >>> prelulayer = tl.layers.PRelu(channel_shared=True, in_channels=5)(inputs) References ----------- @@ -141,6 +132,11 @@ class PRelu6(Module): name : None or str A unique layer name. + Examples + ----------- + >>> inputs = tl.layers.Input([10, 5]) + >>> prelulayer = tl.layers.PRelu6(channel_shared=True, in_channels=5)(inputs) + References ----------- - `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `__ @@ -249,6 +245,11 @@ class PTRelu6(Module): name : None or str A unique layer name. + Examples + ----------- + >>> inputs = tl.layers.Input([10, 5]) + >>> prelulayer = tl.layers.PTRelu6(channel_shared=True, in_channels=5)(inputs) + References ----------- - `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `__ @@ -347,6 +348,11 @@ class Ramp(Module): Tensor A ``Tensor`` in the same type as ``x``. + Examples + ----------- + >>> inputs = tl.layers.Input([10, 5]) + >>> prelulayer = tl.layers.Ramp()(inputs) + """ def __init__(self, v_min=0, v_max=1): @@ -380,7 +386,6 @@ class LeakyReLU(Module): Examples -------- - >>> import tensorlayer as tl >>> net = tl.layers.Input([10, 200]) >>> net = tl.layers.LeakyReLU(alpha=0.5)(net) @@ -429,7 +434,6 @@ class LeakyReLU6(Module): Examples -------- - >>> import tensorlayer as tl >>> net = tl.layers.Input([10, 200]) >>> net = tl.layers.LeakyReLU6(alpha=0.5)(net) @@ -487,7 +491,6 @@ class LeakyTwiceRelu6(Module): Examples -------- - >>> import tensorlayer as tl >>> net = tl.layers.Input([10, 200]) >>> net = tl.layers.LeakyTwiceRelu6(alpha_low=0.5, alpha_high=0.2)(net) @@ -535,6 +538,11 @@ class Swish(Module): name: str function name (optional). + Examples + -------- + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.Swish()(net) + Returns ------- Tensor @@ -563,6 +571,11 @@ class HardTanh(Module): name : str The function name (optional). + Examples + -------- + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.HardTanh()(net) + Returns ------- Tensor @@ -588,6 +601,11 @@ class Mish(Module): x : Tensor input. + Examples + -------- + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.Mish()(net) + Returns ------- Tensor diff --git a/tensorlayer/layers/convolution/binary_conv.py b/tensorlayer/layers/convolution/binary_conv.py index e5ab6c5..f949a48 100644 --- a/tensorlayer/layers/convolution/binary_conv.py +++ b/tensorlayer/layers/convolution/binary_conv.py @@ -4,7 +4,6 @@ import tensorlayer as tl from tensorlayer import logging from tensorlayer.layers.core import Module -from tensorlayer.backend import BACKEND __all__ = [ 'BinaryConv2d', @@ -49,8 +48,7 @@ class BinaryConv2d(Module): >>> net = tl.layers.Input([8, 100, 100, 32], name='input') >>> binaryconv2d = tl.layers.BinaryConv2d( - ... n_filter=64, filter_size=(3, 3), strides=(2, 2), act=tl.relu, in_channels=32, name='binaryconv2d' - ... )(net) + ... n_filter=64, filter_size=(3, 3), strides=(2, 2), act=tl.ReLU, in_channels=32, name='binaryconv2d')(net) >>> print(binaryconv2d) >>> output shape : (8, 50, 50, 64) diff --git a/tensorlayer/layers/convolution/deformable_conv.py b/tensorlayer/layers/convolution/deformable_conv.py index 8a2cba0..9de896c 100644 --- a/tensorlayer/layers/convolution/deformable_conv.py +++ b/tensorlayer/layers/convolution/deformable_conv.py @@ -16,7 +16,7 @@ class DeformableConv2d(Module): Parameters ---------- - offset_layer : tf.Tensor + offset_layer : tl.Tensor To predict the offset of convolution operations. The shape is (batchsize, input height, input width, 2*(number of element in the convolution kernel)) e.g. if apply a 3*3 kernel, the number of the last dimension should be 18 (2*3*3) @@ -40,7 +40,6 @@ class DeformableConv2d(Module): Examples -------- With TensorLayer - >>> net = tl.layers.Input([5, 10, 10, 16], name='input') >>> offset1 = tl.layers.Conv2d( ... n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', name='offset1' @@ -58,7 +57,6 @@ class DeformableConv2d(Module): References ---------- - The deformation operation was adapted from the implementation in `here `__ - Notes ----- - The padding is fixed to 'SAME'. @@ -66,9 +64,11 @@ class DeformableConv2d(Module): """ + # @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, offset_layer=None, + # shape=(3, 3, 1, 100), n_filter=32, filter_size=(3, 3), act=None, @@ -76,7 +76,7 @@ class DeformableConv2d(Module): W_init=tl.initializers.truncated_normal(stddev=0.02), b_init=tl.initializers.constant(value=0.0), in_channels=None, - name=None + name=None # 'deformable_conv_2d', ): super().__init__(name, act=act) @@ -88,17 +88,10 @@ class DeformableConv2d(Module): self.b_init = b_init self.in_channels = in_channels - # layer forward state - self._forward_state = False - self.kernel_n = filter_size[0] * filter_size[1] if self.offset_layer.get_shape()[-1] != 2 * self.kernel_n: raise AssertionError("offset.get_shape()[-1] is not equal to: %d" % 2 * self.kernel_n) - if self.in_channels is not None: - self.build(None) - self._built = True - logging.info( "DeformableConv2d %s: n_filter: %d, filter_size: %s act: %s" % ( self.name, self.n_filter, str(self.filter_size @@ -106,7 +99,6 @@ class DeformableConv2d(Module): ) ) - def __repr__(self): actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ( @@ -122,13 +114,14 @@ class DeformableConv2d(Module): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape): - if self.in_channels is None: - self.in_channels = inputs_shape[-1] + + self.in_channels = inputs_shape[-1] self.input_h = int(inputs_shape[1]) self.input_w = int(inputs_shape[2]) - initial_offsets = tl.ops.stack(tl.ops.meshgrid(tl.ops.range(self.filter_size[0]), - tl.ops.range(self.filter_size[1]), indexing='ij')) # initial_offsets --> (kh, kw, 2) + initial_offsets = tl.ops.stack( + tl.ops.meshgrid(tl.ops.range(self.filter_size[0]), tl.ops.range(self.filter_size[1]), indexing='ij') + ) # initial_offsets --> (kh, kw, 2) initial_offsets = tl.ops.reshape(initial_offsets, (-1, 2)) # initial_offsets --> (n, 2) initial_offsets = tl.ops.expand_dims(initial_offsets, 0) # initial_offsets --> (1, n, 2) initial_offsets = tl.ops.expand_dims(initial_offsets, 0) # initial_offsets --> (1, 1, n, 2) @@ -168,12 +161,15 @@ class DeformableConv2d(Module): self._built = True self._forward_state = True + # shape = (filter_size[0], filter_size[1], pre_channel, n_filter) offset = self.offset_layer grid_offset = self.grid_offset input_deform = self._tf_batch_map_offsets(inputs, offset, grid_offset) outputs = self.conv3d(input=input_deform, filters=self.W) - outputs = tl.ops.reshape(tensor=outputs, shape=[outputs.get_shape()[0], self.input_h, self.input_w, self.n_filter]) + outputs = tl.ops.reshape( + tensor=outputs, shape=[outputs.get_shape()[0], self.input_h, self.input_w, self.n_filter] + ) if self.b_init: outputs = self.bias_add(outputs, self.b) if self.act: @@ -219,21 +215,17 @@ class DeformableConv2d(Module): def _tf_batch_map_coordinates(self, inputs, coords): """Batch version of tf_map_coordinates - Only supports 2D feature maps - Parameters ---------- - inputs : ``tf.Tensor`` + inputs : ``tl.Tensor`` shape = (b*c, h, w) - coords : ``tf.Tensor`` + coords : ``tl.Tensor`` shape = (b*c, h, w, n, 2) - Returns ------- - ``tf.Tensor`` + ``tl.Tensor`` A Tensor with the shape as (b*c, h, w, n) - """ inputs_shape = inputs.get_shape() coords_shape = coords.get_shape() @@ -243,8 +235,8 @@ class DeformableConv2d(Module): kernel_n = int(coords_shape[3]) n_coords = input_h * input_w * kernel_n - coords_lt = tl.ops.cast(tl.ops.floor(coords), 'int32') - coords_rb = tl.ops.cast(tl.ops.ceil(coords), 'int32') + coords_lt = tl.ops.cast(tl.ops.Floor()(coords), 'int32') + coords_rb = tl.ops.cast(tl.ops.Ceil()(coords), 'int32') coords_lb = tl.ops.stack([coords_lt[:, :, :, :, 0], coords_rb[:, :, :, :, 1]], axis=-1) coords_rt = tl.ops.stack([coords_rb[:, :, :, :, 0], coords_lt[:, :, :, :, 1]], axis=-1) @@ -265,21 +257,18 @@ class DeformableConv2d(Module): def _tf_batch_map_offsets(self, inputs, offsets, grid_offset): """Batch map offsets into input - Parameters ------------ - inputs : ``tf.Tensor`` + inputs : ``tl.Tensor`` shape = (b, h, w, c) - offsets: ``tf.Tensor`` + offsets: ``tl.Tensor`` shape = (b, h, w, 2*n) - grid_offset: `tf.Tensor`` + grid_offset: `tl.Tensor`` Offset grids shape = (h, w, n, 2) - Returns ------- - ``tf.Tensor`` + ``tl.Tensor`` A Tensor with the shape as (b, h, w, c) - """ inputs_shape = inputs.get_shape() batch_size = tl.get_tensor_shape(inputs)[0] @@ -293,8 +282,6 @@ class DeformableConv2d(Module): # offsets (b, h, w, 2*n) --> (b, h, w, n, 2) offsets = tl.ops.reshape(offsets, (batch_size, input_h, input_w, kernel_n, 2)) - # offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2) - # offsets = tf.tile(offsets, [channel, 1, 1, 1, 1]) coords = tl.ops.expand_dims(grid_offset, 0) # grid_offset --> (1, h, w, n, 2) coords = tl.ops.tile(coords, [batch_size, 1, 1, 1, 1]) + offsets # grid_offset --> (b, h, w, n, 2) @@ -313,12 +300,3 @@ class DeformableConv2d(Module): mapped_vals = self._to_b_h_w_n_c(mapped_vals, [batch_size, input_h, input_w, kernel_n, channel]) return mapped_vals - -if __name__ == '__main__': - net = tl.layers.Input([5, 10, 10, 16], name='input') - offset1 = tl.layers.Conv2d(n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', name='offset1', in_channels=16)(net) - deformconv1 = DeformableConv2d(offset_layer=offset1, n_filter=32, filter_size=(3, 3), name='deformable1')(net) - offset2 = tl.layers.Conv2d(n_filter=18, filter_size=(3, 3), strides=(1, 1), padding='SAME', name='offset2', in_channels=32)(deformconv1) - deformconv2 = DeformableConv2d(offset_layer=offset2, n_filter=64, filter_size=(3, 3), name='deformable2')(deformconv1) - print(deformconv2) - diff --git a/tensorlayer/layers/convolution/depthwise_conv.py b/tensorlayer/layers/convolution/depthwise_conv.py index bac18de..e84e0d0 100644 --- a/tensorlayer/layers/convolution/depthwise_conv.py +++ b/tensorlayer/layers/convolution/depthwise_conv.py @@ -138,7 +138,7 @@ class DepthwiseConv2d(Module): if BACKEND == 'mindspore': self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, 1) - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init, transposed=True) self.depthwise_conv2d = tl.ops.DepthwiseConv2d( strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, diff --git a/tensorlayer/layers/convolution/dorefa_conv.py b/tensorlayer/layers/convolution/dorefa_conv.py index 50396cd..de82b50 100644 --- a/tensorlayer/layers/convolution/dorefa_conv.py +++ b/tensorlayer/layers/convolution/dorefa_conv.py @@ -52,7 +52,7 @@ class DorefaConv2d(Module): >>> net = tl.layers.Input([8, 12, 12, 32], name='input') >>> dorefaconv2d = tl.layers.DorefaConv2d( - ... n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tl.relu, padding='SAME', name='dorefaconv2d' + ... n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tl.ReLU, padding='SAME', name='dorefaconv2d' ... )(net) >>> print(dorefaconv2d) >>> output shape : (8, 12, 12, 32) diff --git a/tensorlayer/layers/convolution/group_conv.py b/tensorlayer/layers/convolution/group_conv.py index cbbbd47..079961e 100644 --- a/tensorlayer/layers/convolution/group_conv.py +++ b/tensorlayer/layers/convolution/group_conv.py @@ -13,6 +13,7 @@ __all__ = [ class GroupConv2d(Module): """The :class:`GroupConv2d` class is 2D grouped convolution, see `here `__. + Parameters -------------- n_filter : int @@ -39,6 +40,7 @@ class GroupConv2d(Module): The number of in channels. name : None or str A unique layer name. + Examples --------- With TensorLayer @@ -48,6 +50,7 @@ class GroupConv2d(Module): ... )(net) >>> print(groupconv2d) >>> output shape : (8, 12, 12, 64) + """ def __init__( diff --git a/tensorlayer/layers/convolution/quan_conv.py b/tensorlayer/layers/convolution/quan_conv.py index 87f4f52..f89c648 100644 --- a/tensorlayer/layers/convolution/quan_conv.py +++ b/tensorlayer/layers/convolution/quan_conv.py @@ -55,7 +55,7 @@ class QuanConv2d(Module): >>> net = tl.layers.Input([8, 12, 12, 64], name='input') >>> quanconv2d = tl.layers.QuanConv2d( - ... n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='quancnn2d' + ... n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tl.ReLU, padding='SAME', name='quancnn2d' ... )(net) >>> print(quanconv2d) >>> output shape : (8, 12, 12, 32) @@ -149,8 +149,9 @@ class QuanConv2d(Module): self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) self.bias_add = tl.ops.BiasAdd(data_format=self.data_format) - self.conv2d = tl.ops.Conv2D(strides=self.strides, padding=self.padding, data_format=self.data_format, - dilations=self._dilation_rate) + self.conv2d = tl.ops.Conv2D( + strides=self.strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate + ) def forward(self, inputs): if self._forward_state == False: diff --git a/tensorlayer/layers/convolution/quan_conv_bn.py b/tensorlayer/layers/convolution/quan_conv_bn.py index 335742b..cec940d 100644 --- a/tensorlayer/layers/convolution/quan_conv_bn.py +++ b/tensorlayer/layers/convolution/quan_conv_bn.py @@ -237,4 +237,4 @@ class QuanConv2dWithBN(Module): return tf.compat.v1.div(tf.multiply(gama, w), tf.sqrt(var + epsilon)) def _bias_fold(self, beta, gama, mean, var, epsilon): - return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon))) \ No newline at end of file + return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon))) diff --git a/tensorlayer/layers/convolution/separable_conv.py b/tensorlayer/layers/convolution/separable_conv.py index b837e4e..fe721b6 100644 --- a/tensorlayer/layers/convolution/separable_conv.py +++ b/tensorlayer/layers/convolution/separable_conv.py @@ -15,6 +15,7 @@ __all__ = [ class SeparableConv1d(Module): """The :class:`SeparableConv1d` class is a 1D depthwise separable convolutional layer. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. + Parameters ------------ n_filter : int @@ -43,13 +44,15 @@ class SeparableConv1d(Module): The number of in channels. name : None or str A unique layer name. + Examples -------- With TensorLayer >>> net = tl.layers.Input([8, 50, 64], name='input') - >>> separableconv1d = tl.layers.SeparableConv1d(n_filter=32, filter_size=3, strides=2, padding='SAME', act=tf.nn.relu, name='separable_1d')(net) + >>> separableconv1d = tl.layers.SeparableConv1d(n_filter=32, filter_size=3, strides=2, padding='SAME', act=tl.ReLU, name='separable_1d')(net) >>> print(separableconv1d) >>> output shape : (8, 25, 32) + """ def __init__( @@ -112,10 +115,10 @@ class SeparableConv1d(Module): if BACKEND == 'tensorflow': self.depthwise_filter_shape = (self.filter_size, self.in_channels, self.depth_multiplier) - self.pointwise_filter_shape = (1, self.depth_multiplier * self.in_channels, self.n_filter) elif BACKEND == 'mindspore': self.depthwise_filter_shape = (self.filter_size, 1, self.depth_multiplier * self.in_channels) - self.pointwise_filter_shape = (1, self.depth_multiplier * self.in_channels, self.n_filter) + + self.pointwise_filter_shape = (1, self.depth_multiplier * self.in_channels, self.n_filter) self.depthwise_W = self._get_weights( 'depthwise_filters', shape=self.depthwise_filter_shape, init=self.depthwise_init @@ -159,6 +162,7 @@ class SeparableConv1d(Module): class SeparableConv2d(Module): """The :class:`SeparableConv2d` class is a 2D depthwise separable convolutional layer. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. + Parameters ------------ n_filter : int @@ -187,13 +191,15 @@ class SeparableConv2d(Module): The number of in channels. name : None or str A unique layer name. + Examples -------- With TensorLayer >>> net = tl.layers.Input([8, 50, 50, 64], name='input') - >>> separableconv2d = tl.layers.SeparableConv2d(n_filter=32, filter_size=3, strides=2, depth_multiplier = 3 , padding='SAME', act=tf.nn.relu, name='separable_2d')(net) + >>> separableconv2d = tl.layers.SeparableConv2d(n_filter=32, filter_size=3, strides=2, depth_multiplier = 3 , padding='SAME', act=tl.ReLU, name='separable_2d')(net) >>> print(separableconv2d) >>> output shape : (8, 24, 24, 32) + """ def __init__( @@ -307,13 +313,3 @@ class SeparableConv2d(Module): if self.act_init_flag: outputs = self.act(outputs) return outputs - - -if __name__ == '__main__': - net = tl.layers.Input([5, 400, 400, 3], name='input') - layer = SeparableConv2d( - in_channels=3, filter_size=(3, 3), strides=(2, 2), dilation_rate=(2, 2), act=tl.ReLU, depth_multiplier=3, - name='separableconv2d1' - ) - print(len(layer.all_weights)) - print(layer(net).shape) diff --git a/tensorlayer/layers/convolution/simplified_conv.py b/tensorlayer/layers/convolution/simplified_conv.py index e78dfbd..c28566c 100644 --- a/tensorlayer/layers/convolution/simplified_conv.py +++ b/tensorlayer/layers/convolution/simplified_conv.py @@ -5,7 +5,6 @@ from tensorlayer.layers.core import Module import tensorlayer as tl from tensorlayer import logging - __all__ = [ 'Conv1d', 'Conv2d', @@ -51,7 +50,7 @@ class Conv1d(Module): >>> net = tl.layers.Input([8, 100, 1], name='input') >>> conv1d = tl.layers.Conv1d(n_filter=32, filter_size=5, stride=2, b_init=None, in_channels=1, name='conv1d_1') >>> print(conv1d) - >>> tensor = tl.layers.Conv1d(n_filter=32, filter_size=5, stride=2, act=tl.ops.relu, name='conv1d_2')(net) + >>> tensor = tl.layers.Conv1d(n_filter=32, filter_size=5, stride=2, act=tl.ReLU, name='conv1d_2')(net) >>> print(tensor) """ @@ -189,10 +188,10 @@ class Conv2d(Module): -------- With TensorLayer - >>> net = tl.layers.Input([8, 3, 400, 400], name='input') + >>> net = tl.layers.Input([8, 400, 400, 3], name='input') >>> conv2d = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), b_init=None, in_channels=3, name='conv2d_1') >>> print(conv2d) - >>> tensor = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ops.relu, name='conv2d_2')(net) + >>> tensor = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ReLU, name='conv2d_2')(net) >>> print(tensor) """ @@ -337,7 +336,7 @@ RuntimeError: Unable to cast from non-held to held instance (T& to Holder) of >>> net = tl.layers.Input([8, 20, 20, 20, 3], name='input') >>> conv3d = tl.layers.Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), b_init=None, in_channels=3, name='conv3d_1') >>> print(conv3d) - >>> tensor = tl.layers.Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), act=tl.ops.relu, name='conv3d_2')(net) + >>> tensor = tl.layers.Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), act=tl.ReLU, name='conv3d_2')(net) >>> print(tensor) """ @@ -416,9 +415,6 @@ RuntimeError: Unable to cast from non-held to held instance (T& to Holder) of self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) - if self.b_init: - self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) - self.b_init_flag = False if self.b_init: self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) @@ -427,7 +423,7 @@ RuntimeError: Unable to cast from non-held to held instance (T& to Holder) of self.conv3d = tl.ops.Conv3D( strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, - out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1]) + out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1], self.filter_size[2]) ) self.act_init_flag = False @@ -486,7 +482,7 @@ class DeConv1d(Module): >>> net = tl.layers.Input([8, 100, 1], name='input') >>> conv1d = tl.layers.DeConv1d(n_filter=32, filter_size=5, stride=2, b_init=None, in_channels=1, name='Deonv1d_1') >>> print(conv1d) - >>> tensor = tl.layers.DeConv1d(n_filter=32, filter_size=5, stride=2, act=tl.ops.relu, name='Deconv1d_2')(net) + >>> tensor = tl.layers.DeConv1d(n_filter=32, filter_size=5, stride=2, act=tl.ReLU, name='Deconv1d_2')(net) >>> print(tensor) """ @@ -495,7 +491,7 @@ class DeConv1d(Module): self, n_filter=32, filter_size=15, - strides=1, + stride=1, act=None, padding='SAME', data_format="channels_last", @@ -508,7 +504,7 @@ class DeConv1d(Module): super(DeConv1d, self).__init__(name, act=act) self.n_filter = n_filter self.filter_size = filter_size - self.strides = strides + self.stride = stride self.padding = padding self.data_format = data_format self.dilation_rate = dilation_rate @@ -522,7 +518,7 @@ class DeConv1d(Module): logging.info( "DeConv1d %s: n_filter: %d filter_size: %s stride: %d pad: %s act: %s" % ( - self.name, n_filter, filter_size, strides, padding, + self.name, n_filter, filter_size, stride, padding, self.act.__class__.__name__ if self.act is not None else 'No Activation' ) ) @@ -531,7 +527,7 @@ class DeConv1d(Module): actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ( '{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}' - ', strides={strides}, padding={padding}' + ', stride={stride}, padding={padding}' ) if self.dilation_rate != 1: s += ', dilation={dilation_rate}' @@ -567,7 +563,7 @@ class DeConv1d(Module): self.b_init_flag = True self.conv1d_transpose = tl.ops.Conv1d_transpose( - strides=self.strides, + stride=self.stride, padding=self.padding, data_format=self.data_format, dilations=self.dilation_rate, @@ -631,10 +627,10 @@ class DeConv2d(Module): -------- With TensorLayer - >>> net = tl.layers.Input([8, 3, 400, 400], name='input') + >>> net = tl.layers.Input([8, 400, 400, 3], name='input') >>> conv2d_transpose = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), b_init=None, in_channels=3, name='conv2d_transpose_1') >>> print(conv2d_transpose) - >>> tensor = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ops.relu, name='conv2d_transpose_2')(net) + >>> tensor = tl.layers.DeConv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tl.ReLU, name='conv2d_transpose_2')(net) >>> print(tensor) """ @@ -656,10 +652,10 @@ class DeConv2d(Module): super(DeConv2d, self).__init__(name, act=act) self.n_filter = n_filter self.filter_size = filter_size - self._strides = self.strides = strides + self.strides = strides self.padding = padding self.data_format = data_format - self._dilation_rate = self.dilation_rate = dilation_rate + self.dilation_rate = dilation_rate self.W_init = W_init self.b_init = b_init self.in_channels = in_channels @@ -696,20 +692,16 @@ class DeConv2d(Module): self.data_format = 'NHWC' if self.in_channels is None: self.in_channels = inputs_shape[-1] - self._strides = [1, self._strides[0], self._strides[1], 1] - self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1] elif self.data_format == 'channels_first': self.data_format = 'NCHW' if self.in_channels is None: self.in_channels = inputs_shape[1] - self._strides = [1, 1, self._strides[0], self._strides[1]] - self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]] else: raise Exception("data_format should be either channels_last or channels_first") #TODO channels first filter shape [out_channel, in_channel, filter_h, filter_w] self.filter_shape = (self.filter_size[0], self.filter_size[1], self.n_filter, self.in_channels) - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init, transposed=True) self.b_init_flag = False if self.b_init: @@ -718,7 +710,7 @@ class DeConv2d(Module): self.b_init_flag = True self.conv2d_transpose = tl.ops.Conv2d_transpose( - strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, + strides=self.strides, padding=self.padding, data_format=self.data_format, dilations=self.dilation_rate, out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1]), in_channels=self.in_channels ) @@ -781,7 +773,7 @@ class DeConv3d(Module): >>> net = tl.layers.Input([8, 20, 20, 20, 3], name='input') >>> deconv3d = tl.layers.DeConv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), b_init=None, in_channels=3, name='deconv3d_1') >>> print(deconv3d) - >>> tensor = tl.layers.DeConv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), act=tl.ops.relu, name='deconv3d_2')(net) + >>> tensor = tl.layers.DeConv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), act=tl.ReLU, name='deconv3d_2')(net) >>> print(tensor) """ @@ -803,10 +795,10 @@ class DeConv3d(Module): super(DeConv3d, self).__init__(name, act=act) self.n_filter = n_filter self.filter_size = filter_size - self._strides = self.strides = strides + self.strides = strides self.padding = padding self.data_format = data_format - self._dilation_rate = self.dilation_rate = dilation_rate + self.dilation_rate = dilation_rate self.W_init = W_init self.b_init = b_init self.in_channels = in_channels @@ -843,14 +835,10 @@ class DeConv3d(Module): self.data_format = 'NDHWC' if self.in_channels is None: self.in_channels = inputs_shape[-1] - self._strides = [1, self._strides[0], self._strides[1], self._strides[2], 1] - self._dilation_rate = [1, self.dilation_rate[0], self.dilation_rate[1], self.dilation_rate[2], 1] elif self.data_format == 'channels_first': self.data_format = 'NCDHW' if self.in_channels is None: self.in_channels = inputs_shape[1] - self._strides = [1, 1, self._strides[0], self._strides[1], self._strides[2]] - self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1], self._dilation_rate[2]] else: raise Exception("data_format should be either channels_last or channels_first") @@ -858,7 +846,7 @@ class DeConv3d(Module): self.filter_size[0], self.filter_size[1], self.filter_size[2], self.n_filter, self.in_channels ) - self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init) + self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init, transposed=True) if self.b_init: self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) @@ -870,8 +858,9 @@ class DeConv3d(Module): self.b_init_flag = True self.conv3d_transpose = tl.ops.Conv3d_transpose( - strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate, - out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1], self.filter_size[2]) + strides=self.strides, padding=self.padding, data_format=self.data_format, dilations=self.dilation_rate, + out_channel=self.n_filter, k_size=(self.filter_size[0], self.filter_size[1], self.filter_size[2]), + in_channels=self.in_channels ) self.act_init_flag = False diff --git a/tensorlayer/layers/convolution/super_resolution.py b/tensorlayer/layers/convolution/super_resolution.py index 102ef52..0b9339f 100644 --- a/tensorlayer/layers/convolution/super_resolution.py +++ b/tensorlayer/layers/convolution/super_resolution.py @@ -61,11 +61,11 @@ class SubpixelConv1d(Module): logging.info( "SubpixelConv1d %s: scale: %d act: %s" % - (self.name, scale, self.act.__name__ if self.act is not None else 'No Activation') + (self.name, scale, self.act.__class__.__name__ if self.act is not None else 'No Activation') ) def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ('{classname}(in_channels={in_channels}, out_channels={out_channels}') s += (', ' + actstr) if self.name is not None: @@ -163,11 +163,11 @@ class SubpixelConv2d(Module): self._built = True logging.info( "SubpixelConv2d %s: scale: %d act: %s" % - (self.name, scale, self.act.__name__ if self.act is not None else 'No Activation') + (self.name, scale, self.act.__class__.__name__ if self.act is not None else 'No Activation') ) def __repr__(self): - actstr = self.act.__name__ if self.act is not None else 'No Activation' + actstr = self.act.__class__.__name__ if self.act is not None else 'No Activation' s = ('{classname}(in_channels={in_channels}, out_channels={n_out_channels}') s += (', ' + actstr) if self.name is not None: diff --git a/tensorlayer/layers/convolution/ternary_conv.py b/tensorlayer/layers/convolution/ternary_conv.py index 5b60ae0..74e96ec 100644 --- a/tensorlayer/layers/convolution/ternary_conv.py +++ b/tensorlayer/layers/convolution/ternary_conv.py @@ -50,7 +50,7 @@ class TernaryConv2d(Module): >>> net = tl.layers.Input([8, 12, 12, 32], name='input') >>> ternaryconv2d = tl.layers.TernaryConv2d( - ... n_filter=64, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='ternaryconv2d' + ... n_filter=64, filter_size=(5, 5), strides=(1, 1), act=tl.ReLU, padding='SAME', name='ternaryconv2d' ... )(net) >>> print(ternaryconv2d) >>> output shape : (8, 12, 12, 64) @@ -140,9 +140,9 @@ class TernaryConv2d(Module): self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init) self.bias_add = tl.ops.BiasAdd(data_format=self.data_format) - self.conv2d = tl.ops.Conv2D(strides=self._strides, padding=self.padding, data_format=self.data_format, - dilations=self._dilation_rate) - + self.conv2d = tl.ops.Conv2D( + strides=self._strides, padding=self.padding, data_format=self.data_format, dilations=self._dilation_rate + ) def forward(self, inputs): if self._forward_state == False: diff --git a/tensorlayer/layers/core/__init__.py b/tensorlayer/layers/core/__init__.py index a3b3f95..d9d9689 100644 --- a/tensorlayer/layers/core/__init__.py +++ b/tensorlayer/layers/core/__init__.py @@ -9,7 +9,5 @@ elif BACKEND == 'tensorflow': from .core_tensorflow import * elif BACKEND == 'paddle': from .core_paddle import * -elif BACKEND == 'dragon': - from .core_dragon import * else: raise ("Unsupported backend:", BACKEND) diff --git a/tensorlayer/layers/core/common.py b/tensorlayer/layers/core/common.py index 1af257f..92214dd 100644 --- a/tensorlayer/layers/core/common.py +++ b/tensorlayer/layers/core/common.py @@ -37,44 +37,46 @@ def str2act(act): raise Exception("Unsupported act: {}".format(act)) return _act_dict[act] -def _save_weights(self, file_path, format=None): + +def _save_weights(net, file_path, format=None): """Input file_path, save model weights into a file of given format. - Use self.load_weights() to restore. - - Parameters - ---------- - file_path : str - Filename to which the model weights will be saved. - format : str or None - Saved file format. - Value should be None, 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. - 1) If this is set to None, then the postfix of file_path will be used to decide saved format. - If the postfix is not in ['h5', 'hdf5', 'npz', 'ckpt'], then file will be saved in hdf5 format by default. - 2) 'hdf5' will save model weights name in a list and each layer has its weights stored in a group of - the hdf5 file. - 3) 'npz' will save model weights sequentially into a npz file. - 4) 'npz_dict' will save model weights along with its name as a dict into a npz file. - 5) 'ckpt' will save model weights into a tensorflow ckpt file. - - Default None. - - Examples - -------- - 1) Save model weights in hdf5 format by default. - >>> net = vgg16() - >>> net.save_weights('./model.h5') - ... - >>> net.load_weights('./model.h5') - - 2) Save model weights in npz/npz_dict format - >>> net = vgg16() - >>> net.save_weights('./model.npz') - >>> net.save_weights('./model.npz', format='npz_dict') - - """ - - # self.all_weights = self.network.all_weights - if self.all_weights is None or len(self.all_weights) == 0: + Use net.load_weights() to restore. + + Parameters + ---------- + file_path : str + Filename to which the model weights will be saved. + format : str or None + Saved file format. + Value should be None, 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. + 1) If this is set to None, then the postfix of file_path will be used to decide saved format. + If the postfix is not in ['h5', 'hdf5', 'npz', 'ckpt'], then file will be saved in hdf5 format by default. + 2) 'hdf5' will save model weights name in a list and each layer has its weights stored in a group of + the hdf5 file. + 3) 'npz' will save model weights sequentially into a npz file. + 4) 'npz_dict' will save model weights along with its name as a dict into a npz file. + 5) 'ckpt' will save model weights into a tensorflow ckpt file. + + Default None. + + Examples + -------- + 1) Save model weights in hdf5 format by default. + >>> net = vgg16() + >>> optimizer = tl.optimizers.Adam(learning_rate=0.001) + >>> metric = tl.metric.Accuracy() + >>> model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer, metrics=metric) + >>> model.save_weights('./model.h5') + ... + >>> model.load_weights('./model.h5') + + 2) Save model weights in npz/npz_dict format + >>> model.save_weights('./model.npz') + >>> model.save_weights('./model.npz', format='npz_dict') + + """ + + if net.all_weights is None or len(net.all_weights) == 0: logging.warning("Model contains no weights or layers haven't been built, nothing will be saved") return @@ -86,11 +88,12 @@ def _save_weights(self, file_path, format=None): format = 'hdf5' if format == 'hdf5' or format == 'h5': - utils.save_weights_to_hdf5(file_path, self) + raise NotImplementedError("hdf5 load/save is not supported now.") + # utils.save_weights_to_hdf5(file_path, net) elif format == 'npz': - utils.save_npz(self.all_weights, file_path) + utils.save_npz(net.all_weights, file_path) elif format == 'npz_dict': - utils.save_npz_dict(self.all_weights, file_path) + utils.save_npz_dict(net.all_weights, file_path) elif format == 'ckpt': # TODO: enable this when tf save ckpt is enabled raise NotImplementedError("ckpt load/save is not supported now.") @@ -100,8 +103,9 @@ def _save_weights(self, file_path, format=None): "Other format is not supported now." ) -def _load_weights(self, file_path, format=None, in_order=True, skip=False): - """Load model weights from a given file, which should be previously saved by self.save_weights(). + +def _load_weights(net, file_path, format=None, in_order=True, skip=False): + """Load model weights from a given file, which should be previously saved by net.save_weights(). Parameters ---------- @@ -110,7 +114,7 @@ def _load_weights(self, file_path, format=None, in_order=True, skip=False): format : str or None If not specified (None), the postfix of the file_path will be used to decide its format. If specified, value should be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now. - In addition, it should be the same format when you saved the file using self.save_weights(). + In addition, it should be the same format when you saved the file using net.save_weights(). Default is None. in_order : bool Allow loading weights into model in a sequential way or by name. Only useful when 'format' is 'hdf5'. @@ -122,7 +126,7 @@ def _load_weights(self, file_path, format=None, in_order=True, skip=False): skip : bool Allow skipping weights whose name is mismatched between the file and model. Only useful when 'format' is 'hdf5' or 'npz_dict'. If 'skip' is True, 'in_order' argument will be ignored and those loaded weights - whose name is not found in model weights (self.all_weights) will be skipped. If 'skip' is False, error will + whose name is not found in model weights (net.all_weights) will be skipped. If 'skip' is False, error will occur when mismatch is found. Default is False. @@ -130,14 +134,17 @@ def _load_weights(self, file_path, format=None, in_order=True, skip=False): -------- 1) load model from a hdf5 file. >>> net = vgg16() - >>> net.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch - >>> net.load_weights('./model_eager.h5') # load sequentially + >>> optimizer = tl.optimizers.Adam(learning_rate=0.001) + >>> metric = tl.metric.Accuracy() + >>> model = tl.models.Model(network=net, loss_fn=tl.cost.cross_entropy, optimizer=optimizer, metrics=metric) + >>> model.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch + >>> model.load_weights('./model_eager.h5') # load sequentially 2) load model from a npz file - >>> net.load_weights('./model.npz') + >>> model.load_weights('./model.npz') - 2) load model from a npz file, which is saved as npz_dict previously - >>> net.load_weights('./model.npz', format='npz_dict') + 3) load model from a npz file, which is saved as npz_dict previously + >>> model.load_weights('./model.npz', format='npz_dict') Notes ------- @@ -154,16 +161,17 @@ def _load_weights(self, file_path, format=None, in_order=True, skip=False): format = file_path.split('.')[-1] if format == 'hdf5' or format == 'h5': - if skip ==True or in_order == False: - # load by weights name - utils.load_hdf5_to_weights(file_path, self, skip) - else: - # load in order - utils.load_hdf5_to_weights_in_order(file_path, self) + raise NotImplementedError("hdf5 load/save is not supported now.") + # if skip ==True or in_order == False: + # # load by weights name + # utils.load_hdf5_to_weights(file_path, net, skip) + # else: + # # load in order + # utils.load_hdf5_to_weights_in_order(file_path, net) elif format == 'npz': - utils.load_and_assign_npz(file_path, self) + utils.load_and_assign_npz(file_path, net) elif format == 'npz_dict': - utils.load_and_assign_npz_dict(file_path, self, skip) + utils.load_and_assign_npz_dict(file_path, net, skip) elif format == 'ckpt': # TODO: enable this when tf save ckpt is enabled raise NotImplementedError("ckpt load/save is not supported now.") diff --git a/tensorlayer/layers/core/core_dragon.py b/tensorlayer/layers/core/core_dragon.py deleted file mode 100644 index f07772c..0000000 --- a/tensorlayer/layers/core/core_dragon.py +++ /dev/null @@ -1,765 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf-8 -*- -#TODO Dragon Module needs a better implementation - -import time -import dragon as dg -import tensorlayer as tl -from tensorlayer.layers.utils import (get_variable_with_initializer) -from .common import str2act, _save_weights, _load_weights -from collections import OrderedDict -from tensorlayer import logging - -__all__ = ['Module', 'SequentialLayer', 'LayerList'] - -_global_layer_name_dict = {} -Parameter_ = dg.Tensor - -class Module(object): - - def __init__(self, name=None, act=None, *args, **kwargs): - self._params = OrderedDict() - self._layers = OrderedDict() - self._params_status = OrderedDict() - self._parameter_layout_dict = {} - self._create_time = int(time.time() * 1e9) - - global _global_layer_name_dict - if name is None: - prefix = self.__class__.__name__.lower() - - if _global_layer_name_dict.get(prefix) is not None: - _global_layer_name_dict[prefix] += 1 - name = prefix + '_' + str(_global_layer_name_dict[prefix]) - else: - _global_layer_name_dict[prefix] = 0 - name = prefix - while True: - if _global_layer_name_dict.get(name) is None: - break - _global_layer_name_dict[prefix] += 1 - name = prefix + '_' + str(_global_layer_name_dict[prefix]) - else: - if _global_layer_name_dict.get(name) is not None: - pass - else: - _global_layer_name_dict[name] = 0 - - self.name = name - - if isinstance(act, str): - str_act = str2act(act) - - if act: - if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or len(act) > 10 and act[0:10] == "leaky_relu"): - self.act = str_act - elif isinstance(act, str): - self.act = str_act() - else: - self.act = act() - else: - self.act = act - - # Layer building state - self._built = False - - # Layer nodes state - self._nodes = [] - self._nodes_fixed = False - - # Layer weight state - self._all_weights = [] - self._trainable_weights = [] - self._nontrainable_weights = [] - - # layer forward state - self._forward_state = False - - # Layer training state - self.is_train = True - - def extend_repr(self): - """ - Sets the extended representation of the Module. - - To print customized extended information, re-implement this method in your own Layers. - """ - return '' - - def __repr__(self): - extra_str = self.extend_repr() - info_str = self.__class__.__name__ + '<' - if self._layers: - sub_str = '\n' - if extra_str: - sub_str += '{}\n'.format(self.extend_repr()) - for key, value in self._layers.items(): - sub_str += '({}): {}\n'.format(key, repr(value)) - sub_str = sub_str.replace('\n', '\n ') + '>' - info_str += sub_str - else: - info_str += extra_str + '>' - return info_str - - def __setattr__(self, name, value): - layers = self.__dict__.get('_layers') - params = self.__dict__.get('_params') - - if isinstance(value, Parameter_): - if params is None: - raise AttributeError("Can not assign params before Module.__init__() call.") - if name in self.__dict__: - if self.__dict__[name] is not None: - raise TypeError("Expected type is not in (Parameter, Module), but got Parameter.") - del self.__dict__[name] - if layers and name in layers: - raise TypeError("Expected type is Module, but got Parameter.") - self.insert_param_to_layer(name, value) - - elif isinstance(value, Module): - if layers is None: - raise AttributeError("Can not assign layers before Module.__init__() call.") - if name in self.__dict__: - del self.__dict__[name] - if params and name in params: - raise TypeError("Expected type is Parameter, but got Module.") - # TODO How to prompt the user, enter the in_channels. - # TODO Automatic shape inference when the user does not enter inchannels. - # if value._built is False: - # raise AttributeError( - # "The registered layer `{}` should be built in advance. " - # "Do you forget to pass the keyword argument 'in_channels'? ".format(value.name) - # ) - layers[name] = value - else: - object.__setattr__(self, name, value) - - def __call__(self, inputs, *args, **kwargs): - - output = self.forward(inputs, *args, **kwargs) - - return output - - def forward(self, *inputs, **kwargs): - raise Exception("The forward method must be implemented by inherited class") - - def build(self, inputs_shape): - raise Exception("The build(self, inputs_shape) method must be implemented by inherited class") - - def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True): - """ Get trainable variables. """ - weight = get_variable_with_initializer( - scope_name=self.name, var_name=var_name, shape=shape, init=init, trainable=trainable - ) - self.trainable = trainable - return weight - - def save_weights(self, file_path, format=None): - """Input file_path, save model weights into a file of given format.""" - _save_weights(self, file_path, format) - - def load_weights(self, file_path, format=None, in_order=True, skip=False): - """Load model weights from a given file, which should be previously saved by self.save_weights().""" - _load_weights(self, file_path, format, in_order, skip) - - def _set_mode_for_layers(self, is_train): - """Set all layers of this network to a given mode. - - Parameters - ---------- - is_train : boolean - Network's mode. True means training mode while False means evaluation mode. - - """ - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - if isinstance(layer, Module): - layer.is_train = is_train - - - def set_train(self): - """Set this network in training mode. After calling this method, - all layers in network are in training mode, in particular, BatchNorm, Dropout, etc. - TODO It is not possible to modify the parameter state after initialization, and a better way needs to be found. - Examples - -------- - >>> import tensorlayer as tl - >>> net = tl.vgg16() - >>> net.set_train() - - """ - if self.is_train !=True: - self.is_train = True - self._set_mode_for_layers(True) - - def set_eval(self): - """Set this network in evaluation mode. After calling this method, - all layers in network are in evaluation mode, in particular, BatchNorm, Dropout, etc. - TODO It is not possible to modify the parameter state after initialization, and a better way needs to be found. - Examples - -------- - >>> import tensorlayer as tl - >>> net = tl.vgg16() - >>> net.eval() - # do evaluation - - """ - if self.is_train != False: - self.is_train = False - self._set_mode_for_layers(False) - - def test(self): - """Set this network in evaluation mode.""" - self.eval() - - def infer(self): - """Set this network in evaluation mode.""" - self.eval() - - @staticmethod - def _compute_shape(tensors): - if isinstance(tensors, list): - shape_mem = [tl.get_tensor_shape(t) for t in tensors] - else: - shape_mem = tl.get_tensor_shape(tensors) - return shape_mem - - def insert_param_to_layer(self, param_name, param, check_name=True): - """ - Adds a parameter to the current layer. - - Inserts a parameter with given name to the layer. Please refer to the usage in - source code of `tensorlayer.layer.Module.__setattr__`. - - Args: - param_name (str): Name of the parameter. - param (Parameter): Parameter to be inserted to the layer. - check_name (bool): Determines whether the name input is compatible. Default: True. - - Raises: - KeyError: If the name of parameter is null or contains dot. - AttributeError: If user did not call init() first. - TypeError: If the type of parameter is not Parameter_. - """ - if not param_name: - raise KeyError("The name of parameter should not be null.") - if check_name and '.' in param_name: - raise KeyError("The name of parameter should not contain \".\"") - if '_params' not in self.__dict__: - raise AttributeError("You need call init() first.") - if hasattr(self, param_name) and param_name not in self._params: - raise KeyError("Duplicated parameter name '{}'.".format(param_name)) - if not isinstance(param, Parameter_) and param is not None: - raise TypeError("The type of parameter should be 'Parameter' if not None.") - self._params[param_name] = param - try: - self._params_status[param_name] = self.trainable - except: - pass - - def _add_node(self, input_tensors, output_tensors): - """Add a LayerNode for this layer given input_tensors, output_tensors. - - WARINING: This function should not be called from outside, it should only be called - in layer.__call__ when building static model. - - Parameters - ---------- - input_tensors : Tensor or a list of tensors - Input tensors to this layer. - output_tensors : Tensor or a list of tensors - Output tensors to this layer. - - """ - raise NotImplementedError - - @property - def create_time(self): - return self._create_time - - def __getattr__(self, name): - if '_params' in self.__dict__: - params = self.__dict__['_params'] - if name in params: - return params[name] - if '_layers' in self.__dict__: - layers = self.__dict__['_layers'] - if name in layers: - return layers[name] - if '_params_status' in self.__dict__: - params_status = self.__dict__['_params_status'] - if name in params_status: - return params_status[name] - raise AttributeError("'{}' object has no attribute '{}'.".format(type(self).__name__, name)) - - def __delattr__(self, name): - if name in self._params: - del self._params[name] - elif name in self._layers: - del self._layers[name] - else: - object.__delattr__(self, name) - - @property - def trainable_weights(self): - """ - Returns all trainable weights. - - Returns a list of all trainable parmeters. - - Args: - recurse (bool): Whether contains the trainable weights of sublayers. Default: True. - - Returns: - List, the list of trainable weights. - """ - self.get_weights() - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - params = layer._params.items() - params_status = layer._params_status.items() - params_zip = zip(params, params_status) - for params, params_status in params_zip: - if params_status[1] ==True: - self._trainable_weights.append(params[1]) - return self._trainable_weights - - @property - def nontrainable_weights(self): - """ - Returns all untrainable weights. - - Returns a list of all untrainable weights. - - Args: - recurse (bool): Whether contains the untrainable weights of sublayers. Default: True. - - Returns: - List, the list of untrainable weights. - """ - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - params = layer._params.items() - params_status = layer._params_status.items() - params_zip = zip(params, params_status) - for params, params_status in params_zip: - if params_status[1] == False: - self._nontrainable_weights.append(params[1]) - return self._nontrainable_weights - - @property - def all_weights(self): - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - params = layer._params.items() - for par, val in params: - self._all_weights.append(val) - return self._all_weights - - def get_weights(self, expand=True): - """ - Returns an iterator over layer weights. - - Yields weights of this layer. If `expand` is True, yield parameters of this layer and all sublayers. - - Args: - expand (bool): If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters - that are direct members of this layer. Default: True. - - Examples: - >>> net = Net() - >>> for item in net.get_weights(): - >>> print(item) - """ - for _, param in self.parameters_and_names(expand=expand): - yield param - - def check_names(self): - names = set("") - for value, param in self.parameters_and_names(): - if param.name in names: - raise ValueError( - "The value of {} is {}, its name '{}' already exists.".format(value, param, param.name) - ) - names.add(param.name) - - def insert_child_to_layer(self, child_name, child): - """ - Adds a child layer to the current layer. - - Args: - child_name (str): Name of the child layer. - child (Module): The child layer to be inserted. - - Raises: - KeyError: Child Module's name is incorrect or duplicated with the other child name. - TypeError: Child Module's type is incorrect. - """ - if not child_name or '.' in child_name: - raise KeyError("Child layer name is incorrect.") - if hasattr(self, child_name) and child_name not in self._layers: - raise KeyError("Duplicate child name '{}'.".format(child_name)) - if not isinstance(child, Module) and child is not None: - raise TypeError("Child layer type is incorrect.") - self._layers[child_name] = child - - def parameters_and_names(self, name_prefix='', expand=True): - """ - Returns an iterator over layer parameters. - - Includes the parameter's name and itself. - - Args: - name_prefix (str): Namespace. Default: ''. - expand (bool): If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters - that are direct members of this layer. Default: True. - - Examples: - >>> n = Net() - >>> names = [] - >>> for m in n.parameters_and_names(): - >>> if m[0]: - >>> names.append(m[0]) - """ - layers = [] - if expand: - layers = self.layers_and_names(name_prefix=name_prefix) - else: - layers.append((name_prefix, self)) - - params_set = set() - for layer_name, layer in layers: - params = layer._params.items() - for par_name, par in params: - if par.inited_param is not None: - par = par.inited_param - if par is not None and id(par) not in params_set: - params_set.add(id(par)) - par_new_name = par_name - if layer_name: - par_new_name = layer_name + '.' + par_new_name - - yield par_new_name, par - - def layers_and_names(self, layers=None, name_prefix=''): - """ - Returns an iterator over all layers in the network. - - Includes the layer's name and itself. - - Args: - layers (str): layers to iterate over. Default: None. - name_prefix (str): Namespace. Default: ''. - - Examples: - >>> n = Net() - >>> names = [] - >>> for m in n.layers_and_names(): - >>> if m[0]: - >>> names.append(m[0]) - """ - t_layers = layers if layers else set() - if self in t_layers: - return - - t_layers.add(self) - yield name_prefix, self - - for name, layer in self._layers.items(): - if layer: - layers_name_prefix = name - if name_prefix: - layers_name_prefix = name_prefix + '.' + layers_name_prefix - for ele in layer.layers_and_names(t_layers, layers_name_prefix): - yield ele - - def layers(self): - """Returns an iterator over immediate layers.""" - return self.name_layers().values() - - def name_layers(self): - """ - Returns an iterator over all layers in the network. - - Include name of the layer and layer itself. - """ - value_set = set() - layers = OrderedDict() - for name, layer in self._layers.items(): - if layer is not None and layer not in value_set: - value_set.add(layer) - layers[name] = layer - return layers - - def init_build(self, *inputs, **kwargs): - """ - (1) This method must be called when the Layer has no input in_channels. - (2) Automatic shape inference when the user does not enter inchannels. - """ - - self.forward(*inputs, **kwargs) - - -class SequentialLayer(Module): - """ - Sequential layer container. - - A list of Layers will be added to it in the order they are passed in the constructor. - Alternatively, an ordered dict of layers can also be passed in. - - Args: - args (list, OrderedDict): List of subclass of Module. - - Raises: - TypeError: If the type of the argument is not list or OrderedDict. - - Inputs: - - **input** (Tensor) - Tensor with shape according to the first Module in the sequence. - - Outputs: - Tensor, the output Tensor with shape depending on the input and defined sequence of Layers. - - Examples: - >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') - >>> bn = tl.layers.BatchNorm2d(2) - >>> seq = tl.layers.SequentialLayer([conv, bn]) - >>> - >>> x = tl.layers.Input((1, 3, 4, 4)) - >>> seq(x) - """ - def __init__(self, *args): - super(SequentialLayer, self).__init__() - self._built = True - if len(args) == 1: - layers = args[0] - if isinstance(layers, list): - for index, layer in enumerate(layers): - self.insert_child_to_layer(str(index), layer) - elif isinstance(layers, OrderedDict): - for name, layer in layers.items(): - self.insert_child_to_layer(name, layer) - else: - raise TypeError('Layers must be list or orderedDict') - else: - for index, layer in enumerate(args): - self.insert_child_to_layer(str(index), layer) - self.layer_list = list(self._layers.values()) - - def __getitem__(self, index): - if isinstance(index, slice): - return self.__class__( - OrderedDict(list(self._layers.items())[index])) - index = self._valid_index(len(self), index) - return list(self._layers.values())[index] - - def __setitem__(self, index, layer): - if self._valid_module(layer): - index = self._valid_index(len(self), index) - key = list(self._layers.keys())[index] - self._layers[key] = layer - self.layer_list = list(self._layers.values()) - - def __delitem__(self, index): - if isinstance(index, int): - index = self._valid_index(len(self), index) - key = list(self._layers.keys())[index] - del self._layers[key] - elif isinstance(index, slice): - keys = list(self._layers.keys())[index] - for key in keys: - del self._layers[key] - else: - raise TypeError('Index {} is not int type or slice type'.format(index)) - self.layer_list = list(self._layers.values()) - - def __len__(self): - return len(self._layers) - - - def append(self, layer): - if self._valid_module(layer): - self._layers[str(len(self))] = layer - self.layer_list = list(self._layers.values()) - return self - - def build(self, inputs_shape): - pass - - def forward(self, input_data): - for layer in self.layer_list: - input_data = layer(input_data) - return input_data - - def _valid_index(self, layer_num, index): - if not isinstance(index, int): - raise TypeError("Index {} is not int type") - if not -layer_num <= index < layer_num: - raise IndexError("Index should be a number in range [{}, {}), but got {}" - .format(-layer_num, layer_num, index)) - return index % layer_num - - def _valid_module(self, layer): - if issubclass(layer.__class__, Module): - return True - raise TypeError('Module {} is not subclass of Module'.format(layer)) - - -class LayerList(Module): - """ - The class :class:`LayerList` is a linear stack of layers. - - The :class:`LayerList` can be created by passing a list of layer instances. - The given layer instances will be automatically connected one by one. - - Parameters - ---------- - layers: list of Layer - A list of layers. - name : str or None - A unique layer name. If None, a unique name will be automatically assigned. - - Methods - --------- - __init__() - Initializing the LayerList. - weights() - A collection of weights of all the layer instances. - build() - Build the LayerList. The layer instances will be connected automatically one by one. - forward() - Forward the computation. The computation will go through all layer instances. - """ - - def __init__(self, layers, name=None): - """ - Initializing the LayerList given a list of Layer. - - :param layers: list of Layer - :param name: str or None - """ - - super(LayerList, self).__init__(name=name) - self.layers = layers - is_built = True - for layer in self.layers: - self._trainable_weights.extend(layer.trainable_weights) - self._nontrainable_weights.extend(layer.nontrainable_weights) - if layer._built is False: - is_built = False - # if layer._built and layer.all_weights is not None: - # # some layers in the list passed in have already been built - # # e.g. using input shape to construct layers in dynamic eager - # if self._all_weights is None: - # self._all_weights = list() - # self._all_weights.extend(layer.all_weights) - if is_built: - self._built = True - - logging.info( - "LayerList %s including layers [%s]" % (self.name, ', '.join([layer.name for layer in self.layers])) - ) - - # check layer name uniqueness in LayerList - local_layer_name_set = set() - for layer in self.layers: - if layer.name not in local_layer_name_set: - local_layer_name_set.add(layer.name) - else: - raise ValueError( - 'Layer name \'%s\' has already been used by another layer. Please change the layer name.' % - layer.name - ) - - def __getitem__(self, idx): - if isinstance(idx, slice): - return LayerList(list(self.layers)[idx]) - else: - return self.layers[idx] - - def __len__(self): - return len(self.layers) - - def __repr__(self): - tmpstr = 'LayerList' + '(\n' - for idx, layer in enumerate(self.layers): - modstr = layer.__repr__() - modstr = _addindent(modstr, 2) - tmpstr = tmpstr + ' (' + str(idx) + '): ' + modstr + '\n' - - tmpstr = tmpstr + ')' - return tmpstr - - @property - def trainable_weights(self): - return self._trainable_weights - - @property - def nontrainable_weights(self): - return self._nontrainable_weights - - @property - def all_weights(self): - return self._trainable_weights + self._nontrainable_weights - - # def build(self, inputs_shape): - # """ - # Build the LayerList. The layer instances will be connected automatically one by one. - # """ - # in_tensor = self._input_tensors - # # in_layer = self._input_layer - # for layer in self.layers: - # is_build = layer._built - # out_tensor = layer(in_tensor) - # # nlayer = layer(in_layer) - # if is_build is False and layer.all_weights is not None: - # if self._all_weights is None: - # self._all_weights = list() - # self._all_weights.extend(layer.all_weights) - # layer._built = True - # in_tensor = out_tensor - # # in_layer = nlayer - - def forward(self, inputs): - """ - Forward the computation. The computation will go through all layer instances. - """ - z = inputs - for layer in self.layers: - z = layer.forward(z) - return z - - def _set_mode_for_layers(self, is_train): - """Set training/evaluation mode for all layer instances.""" - self.is_train = is_train - for layer in self.layers: - if isinstance(layer, LayerList): - layer._set_mode_for_layers(is_train) - else: - layer.is_train = is_train - - def get_args(self): - init_args = {} - layers = self.layer_args["layers"] - init_args["layers"] = [layer.config for layer in layers] - init_args.update({"layer_type": "layerlist"}) - return init_args - -def tolist(tensors): - if isinstance(tensors, list) or isinstance(tensors, tuple): - ntensors = list() - for t in tensors: - ntensors += tolist(t) - return ntensors - else: - return [tensors] - -def _addindent(s_, numSpaces): - s = s_.split('\n') - # don't do anything for single-line stuff - if len(s) == 1: - return s_ - first = s.pop(0) - s = [(numSpaces * ' ') + line for line in s] - s = '\n'.join(s) - s = first + '\n' + s - return s \ No newline at end of file diff --git a/tensorlayer/layers/core/core_mindspore.py b/tensorlayer/layers/core/core_mindspore.py index b8bfe0d..78a93ef 100644 --- a/tensorlayer/layers/core/core_mindspore.py +++ b/tensorlayer/layers/core/core_mindspore.py @@ -4,10 +4,17 @@ from .common import str2act, _save_weights, _load_weights from mindspore.nn import Cell import tensorlayer as tl -from tensorlayer.layers.utils import (get_variable_with_initializer) from collections import OrderedDict -__all__ = ['Module', 'SequentialLayer', 'LayerList'] +from mindspore import log as logger +import inspect +from mindspore import context +import numpy +import mindspore as ms +from mindspore.common.api import _pynative_exec +from mindspore.common.parameter import Parameter + +__all__ = ['Module', 'SequentialLayer'] _global_layer_name_dict = {} # TODO: better implementation? @@ -44,7 +51,8 @@ class Module(Cell): str_act = str2act(act) if act: - if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or len(act) > 10 and act[0:10] == "leaky_relu"): + if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or + len(act) > 10 and act[0:10] == "leaky_relu"): self.act = str_act elif isinstance(act, str): self.act = str_act() @@ -68,10 +76,12 @@ class Module(Cell): # Layer training state self.is_train = True - # layer forward state self._forward_state = False + # data_format + self.data_format = "NCHW" + def forward(self, *inputs, **kwargs): raise Exception("The forward method must be implemented by inherited class") @@ -81,13 +91,25 @@ class Module(Cell): def build(self, inputs_shape): raise Exception("The build(self, inputs_shape) method must be implemented by inherited class") - def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True): + def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True, transposed=False): """ Get trainable variables. """ - weight = get_variable_with_initializer( - scope_name=self.name, var_name=var_name, shape=shape, init=init, trainable=trainable - ) + var_name = self.name + "/" + var_name + # TODO 2D mindspore weights shape : [out_channel, in_channel, kernel_h, kernel_w] + # TODO 2D mindspore transposed shape [in_channel, out_channel, kernel_h, kernel_w] + if len(shape) == 3: + shape = shape[::-1] + if len(shape) == 4: + if not transposed and self.data_format == 'NHWC': + shape = (shape[3], shape[0], shape[1], shape[2]) + else: + shape = (shape[3], shape[2], shape[0], shape[1]) + if len(shape) == 5: + shape = (shape[4], shape[3], shape[0], shape[1], shape[2]) + + initial_value = init(shape=shape) + var = tl.Variable(initial_value=initial_value, name=var_name, trainable=trainable) self.trainable = trainable - return weight + return var def save_weights(self, file_path, format=None): """Input file_path, save model weights into a file of given format.""" @@ -105,6 +127,59 @@ class Module(Cell): shape_mem = tl.get_tensor_shape(tensors) return shape_mem + def __call__(self, *inputs, **kwargs): + if self.__class__.construct is Cell.construct: + logger.warning( + f"The '{self.__class__}' does not override the method 'construct', " + f"will call the super class(Cell) 'construct'." + ) + if kwargs: + bound_args = inspect.signature(self.construct).bind(*inputs, **kwargs) + inputs = bound_args.args + kwargs = bound_args.kwargs + + if context.get_context("mode") == context.GRAPH_MODE: + raise NotImplemented("GRAPH MODE is not supported, please select PYNATIVE MODE.") + + # if context.get_context("mode") == context.GRAPH_MODE: + # if kwargs: + # raise ValueError("For 'graph' mode, the outermost network does not support passing " + # "variable key-value pair parameters.") + # if self.enable_hook: + # raise ValueError("The graph mode does not support hook function.") + # out = self.compile_and_run(*inputs) + # return out + + self.do_parameter_broadcast() + for item in inputs: + if isinstance(item, numpy.ndarray): + raise TypeError("cell inputs should not be numpy array.") + origin_grad = [] + if self.requires_grad is True: + _pynative_exec.set_grad_flag(True) + _pynative_exec.new_graph(self, *inputs, **kwargs) + for cell in self.cells(): + origin_grad.append(cell.requires_grad) + cell.set_grad(True) + else: + _pynative_exec.set_grad_flag(False) + cast_inputs = list() + if hasattr(self, "_mindspore_flags"): + if self._mindspore_flags.get('fp16'): + cast_inputs = self._cast_mixed_precision_inputs(inputs, ms.float16) + if self._mindspore_flags.get('fp32'): + cast_inputs = self._cast_mixed_precision_inputs(inputs, ms.float32) + if not cast_inputs: + cast_inputs = inputs + output = self.run_construct(cast_inputs, kwargs) + if isinstance(output, Parameter): + output = output.data + if self.requires_grad is True: + _pynative_exec.end_graph(self, output, *inputs, **kwargs) + for i, cell in enumerate(self.cells()): + cell.set_grad(origin_grad[i]) + return output + def _add_node(self, input_tensors, output_tensors): """Add a LayerNode for this layer given input_tensors, output_tensors. @@ -197,32 +272,36 @@ class Module(Cell): class SequentialLayer(Module): """ - Sequential layer container. - - A list of Layers will be added to it in the order they are passed in the constructor. - Alternatively, an ordered dict of layers can also be passed in. - - Args: - args (list, OrderedDict): List of subclass of Module. - - Raises: - TypeError: If the type of the argument is not list or OrderedDict. - - Inputs: - - **input** (Tensor) - Tensor with shape according to the first Module in the sequence. + The class :class:`SequentialLayer` is a linear stack of layers. + The :class:`SequentialLayer` can be created by passing a list of layer instances. + The given layer instances will be automatically connected one by one. + Parameters + ---------- + layers: list of Layer + A list of layers. + name : str or None + A unique layer name. If None, a unique name will be automatically assigned. + Methods + --------- + __init__() + Initializing the LayerList. + weights() + A collection of weights of all the layer instances. + build() + Build the LayerList. The layer instances will be connected automatically one by one. + forward() + Forward the computation. The computation will go through all layer instances. - Outputs: - Tensor, the output Tensor with shape depending on the input and defined sequence of Layers. + Examples + --------- + >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') + >>> bn = tl.layers.BatchNorm2d(2) + >>> seq = tl.layers.SequentialLayer([conv, bn]) + >>> x = tl.layers.Input((1, 3, 4, 4)) + >>> seq(x) - Examples: - >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') - >>> bn = tl.layers.BatchNorm2d(2) - >>> relu = tl.ReLU() - >>> seq = tl.layers.SequentialLayer([conv, bn, relu]) - >>> - >>> x = tl.layers.Input((1, 3, 4, 4)) - >>> seq(x) """ + def __init__(self, *args): super(SequentialLayer, self).__init__() # self._built = True @@ -243,8 +322,7 @@ class SequentialLayer(Module): def __getitem__(self, index): if isinstance(index, slice): - return self.__class__( - OrderedDict(list(self._layers.items())[index])) + return self.__class__(OrderedDict(list(self._layers.items())[index])) index = self._valid_index(len(self), index) return list(self._layers.values())[index] @@ -294,62 +372,12 @@ class SequentialLayer(Module): if not isinstance(index, int): raise TypeError("Index {} is not int type") if not -layer_num <= index < layer_num: - raise IndexError("Index should be a number in range [{}, {}), but got {}" - .format(-layer_num, layer_num, index)) + raise IndexError( + "Index should be a number in range [{}, {}), but got {}".format(-layer_num, layer_num, index) + ) return index % layer_num def _valid_module(self, layer): if issubclass(layer.__class__, Module): return True raise TypeError('Module {} is not subclass of Module'.format(layer)) - - -class LayerList(Module): - """ - The class :class:`LayerList` is a linear stack of layers. - - The :class:`LayerList` can be created by passing a list of layer instances. - The given layer instances will be automatically connected one by one. - - Parameters - ---------- - layers: list of Layer - A list of layers. - name : str or None - A unique layer name. If None, a unique name will be automatically assigned. - - Methods - --------- - __init__() - Initializing the LayerList. - weights() - A collection of weights of all the layer instances. - build() - Build the LayerList. The layer instances will be connected automatically one by one. - forward() - Forward the computation. The computation will go through all layer instances. - """ - - def __init__(self, layers, name=None): - """ - Initializing the LayerList given a list of Layer. - - :param layers: list of Layer - :param name: str or None - """ - - super(LayerList, self).__init__(name=name) - pass - - def __getitem__(self, idx): - pass - - def __len__(self): - return len(self.layers) - - def __repr__(self): - pass - - def forward(self, inputs): - pass - diff --git a/tensorlayer/layers/core/core_paddle.py b/tensorlayer/layers/core/core_paddle.py index 769053f..d4c5d4f 100644 --- a/tensorlayer/layers/core/core_paddle.py +++ b/tensorlayer/layers/core/core_paddle.py @@ -3,11 +3,13 @@ import copy, six from .common import str2act +from .common import _save_weights, _load_weights from paddle.fluid import framework from paddle.fluid.dygraph import Layer from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.dygraph.base import program_desc_tracing_guard, param_guard from paddle.fluid.dygraph import parallel_helper +import paddle as pd _global_layer_name_dict = {} @@ -44,7 +46,8 @@ class Module(Layer): str_act = str2act(act) if act: - if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or len(act) > 10 and act[0:10] == "leaky_relu"): + if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or + len(act) > 10 and act[0:10] == "leaky_relu"): self.act = str_act elif isinstance(act, str): self.act = str_act() @@ -176,8 +179,7 @@ class Module(Layer): with program_desc_tracing_guard(False): self._build_once(*inputs, **kwargs) if parallel_helper._is_data_parallel_mode(): - parallel_helper._broadcast_parameters( - self._parameters.values()) + parallel_helper._broadcast_parameters(self._parameters.values()) self._paddle_built = True outputs = self.forward(*inputs, **kwargs) @@ -189,36 +191,45 @@ class Module(Layer): return outputs - def _get_weights(self, var_name, shape, init=None, trainable=True): - if var_name in ["filters", "weights"]: - w_tmp = self.create_parameter(shape=shape, attr=init, is_bias=False) - elif var_name in ["biases"]: - w_tmp = self.create_parameter(shape=shape, attr=init, is_bias=True) - else: - w_tmp = self.create_parameter(shape=shape, attr=init) + def _get_weights(self, var_name, shape, init=None, trainable=True, transposed=None): + # TODO 2D mindspore weights shape : [out_channel, in_channel, kernel_h, kernel_w] + # TODO 2D mindspore transposed shape [in_channel, out_channel, kernel_h, kernel_w] + if len(shape) == 3: + shape = shape[::-1] + if len(shape) == 4: + if transposed: + shape = (shape[3], shape[0], shape[1], shape[2]) + else: + shape = (shape[3], shape[2], shape[0], shape[1]) + if len(shape) == 5: + shape = (shape[4], shape[3], shape[0], shape[1], shape[2]) + + # if var_name in ["filters", "weights"]: + # var_name = self.name + "/" + var_name + # w_tmp = self.create_parameter(shape=shape, attr=init, is_bias=False, trainable=trainable, var_name=var_name) + # elif var_name in ["biases"]: + # var_name = self.name + "/" + var_name + # w_tmp = self.create_parameter(shape=shape, attr=init, is_bias=True, trainable=trainable, var_name=var_name) + # else: + var_name = self.name + "/" + var_name + w_tmp = self.create_parameter(shape=shape, attr=init, var_name=var_name, trainable=trainable) self.trainable = trainable + return w_tmp - def create_parameter(self, - shape, - attr=None, - dtype=None, - is_bias=False, - default_initializer=None): + def create_parameter( + self, shape, attr=None, dtype=None, is_bias=False, default_initializer=None, trainable=True, var_name=None + ): """Create parameters for this layer.""" - temp_attr = copy.deepcopy(attr) + init_attr = pd.ParamAttr(name=var_name, initializer=attr, trainable=trainable, do_model_average=True) + temp_attr = copy.deepcopy(init_attr) if isinstance(temp_attr, six.string_types) and temp_attr == "": temp_attr = None - return self._helper.create_parameter(temp_attr, shape, dtype, is_bias, - default_initializer) + return self._helper.create_parameter(temp_attr, shape, dtype, is_bias, default_initializer) @property def all_weights(self): - ret = [ - param - for _, param in self.named_parameters( - include_sublayers=True) - ] + ret = [param for _, param in self.named_parameters(include_sublayers=True)] return ret @property @@ -231,4 +242,11 @@ class Module(Layer): (2) Automatic shape inference when the user does not enter inchannels. """ - self.forward(*inputs, **kwargs) \ No newline at end of file + self.forward(*inputs, **kwargs) + + def save_weights(self, file_path, format=None): + _save_weights(net=self, file_path=file_path, format=format) + + def load_weights(self, file_path, format=None, in_order=True, skip=False): + """Load model weights from a given file, which should be previously saved by self.save_weights().""" + _load_weights(net=self, file_path=file_path, format=format, in_order=in_order, skip=skip) diff --git a/tensorlayer/layers/core/core_tensorflow.py b/tensorlayer/layers/core/core_tensorflow.py index 0f70388..113db2d 100644 --- a/tensorlayer/layers/core/core_tensorflow.py +++ b/tensorlayer/layers/core/core_tensorflow.py @@ -16,6 +16,30 @@ Parameter_ = tf.Variable class Module(object): + """The basic :class:`Module` class represents a single layer of a neural network. + It should be subclassed when implementing new types of layers. + Parameters + ---------- + name : str or None + A unique layer name. If None, a unique name will be automatically assigned. + Methods + --------- + __init__() + Initializing the Layer. + __call__() + Forwarding the computation. + all_weights() + Return a list of Tensor which are all weights of this Layer. + trainable_weights() + Return a list of Tensor which are all trainable weights of this Layer. + nontrainable_weights() + Return a list of Tensor which are all nontrainable weights of this Layer. + build() + Abstract method. Build the Layer. All trainable weights should be defined in this function. + forward() + Abstract method. Forward computation and return computation results. + + """ def __init__(self, name=None, act=None, *args, **kwargs): self._params = OrderedDict() @@ -51,7 +75,8 @@ class Module(object): str_act = str2act(act) if act: - if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or len(act) > 10 and act[0:10] == "leaky_relu"): + if isinstance(act, str) and (len(act) > 5 and act[0:5] == "lrelu" or + len(act) > 10 and act[0:10] == "leaky_relu"): self.act = str_act elif isinstance(act, str): self.act = str_act() @@ -68,9 +93,9 @@ class Module(object): self._nodes_fixed = False # Layer weight state - self._all_weights = [] - self._trainable_weights = [] - self._nontrainable_weights = [] + self._all_weights = None + self._trainable_weights = None + self._nontrainable_weights = None # layer forward state self._forward_state = False @@ -83,7 +108,9 @@ class Module(object): Sets the extended representation of the Module. To print customized extended information, re-implement this method in your own Layers. + """ + return '' def __repr__(self): @@ -123,7 +150,6 @@ class Module(object): del self.__dict__[name] if params and name in params: raise TypeError("Expected type is Parameter, but got Module.") - # TODO How to prompt the user, enter the in_channels. # TODO Automatic shape inference when the user does not enter inchannels. # if value._built is False: # raise AttributeError( @@ -146,8 +172,9 @@ class Module(object): def build(self, inputs_shape): raise Exception("The build(self, inputs_shape) method must be implemented by inherited class") - def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True): + def _get_weights(self, var_name, shape, init=tl.initializers.random_normal(), trainable=True, transposed=None): """ Get trainable variables. """ + weight = get_variable_with_initializer( scope_name=self.name, var_name=var_name, shape=shape, init=init, trainable=trainable ) @@ -156,10 +183,12 @@ class Module(object): def save_weights(self, file_path, format=None): """Input file_path, save model weights into a file of given format.""" + _save_weights(self, file_path, format) def load_weights(self, file_path, format=None, in_order=True, skip=False): """Load model weights from a given file, which should be previously saved by self.save_weights().""" + _load_weights(self, file_path, format, in_order, skip) def _set_mode_for_layers(self, is_train): @@ -171,12 +200,12 @@ class Module(object): Network's mode. True means training mode while False means evaluation mode. """ + layers = self.layers_and_names(name_prefix='') for layer_name, layer in layers: if isinstance(layer, Module): layer.is_train = is_train - def set_train(self): """Set this network in training mode. After calling this method, all layers in network are in training mode, in particular, BatchNorm, Dropout, etc. @@ -188,6 +217,7 @@ class Module(object): >>> net.set_train() """ + if self.is_train !=True: self.is_train = True self._set_mode_for_layers(True) @@ -200,22 +230,15 @@ class Module(object): -------- >>> import tensorlayer as tl >>> net = tl.vgg16() - >>> net.eval() + >>> net.set_eval() # do evaluation """ + if self.is_train != False: self.is_train = False self._set_mode_for_layers(False) - def test(self): - """Set this network in evaluation mode.""" - self.eval() - - def infer(self): - """Set this network in evaluation mode.""" - self.eval() - @staticmethod def _compute_shape(tensors): if isinstance(tensors, list): @@ -231,16 +254,17 @@ class Module(object): Inserts a parameter with given name to the layer. Please refer to the usage in source code of `tensorlayer.layer.Module.__setattr__`. - Args: - param_name (str): Name of the parameter. - param (Parameter): Parameter to be inserted to the layer. - check_name (bool): Determines whether the name input is compatible. Default: True. + Parameters + ---------- + param_name : str + Name of the parameter. + param : Parameter + Parameter to be inserted to the layer. + check_name : bool + Determines whether the name input is compatible. Default: True. - Raises: - KeyError: If the name of parameter is null or contains dot. - AttributeError: If user did not call init() first. - TypeError: If the type of parameter is not Parameter_. """ + if not param_name: raise KeyError("The name of parameter should not be null.") if check_name and '.' in param_name: @@ -271,6 +295,7 @@ class Module(object): Output tensors to this layer. """ + raise NotImplementedError @property @@ -304,73 +329,87 @@ class Module(object): def trainable_weights(self): """ Returns all trainable weights. - Returns a list of all trainable parmeters. - Args: - recurse (bool): Whether contains the trainable weights of sublayers. Default: True. - - Returns: - List, the list of trainable weights. """ - self.get_weights() - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - params = layer._params.items() - params_status = layer._params_status.items() - params_zip = zip(params, params_status) - for params, params_status in params_zip: - if params_status[1] ==True: - self._trainable_weights.append(params[1]) + + if self._trainable_weights is not None and len(self._trainable_weights) > 0: + # self._trainable_weights already extracted, so do nothing + pass + else: + self._trainable_weights = [] + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + params = layer._params.items() + params_status = layer._params_status.items() + params_zip = zip(params, params_status) + for params, params_status in params_zip: + if params_status[1] ==True: + self._trainable_weights.append(params[1]) return self._trainable_weights @property def nontrainable_weights(self): """ Returns all untrainable weights. - Returns a list of all untrainable weights. - Args: - recurse (bool): Whether contains the untrainable weights of sublayers. Default: True. - - Returns: - List, the list of untrainable weights. """ - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - params = layer._params.items() - params_status = layer._params_status.items() - params_zip = zip(params, params_status) - for params, params_status in params_zip: - if params_status[1] == False: - self._nontrainable_weights.append(params[1]) + + if self._nontrainable_weights is not None and len(self._nontrainable_weights) > 0: + # self._nontrainable_weights already extracted, so do nothing + pass + else: + self._nontrainable_weights = [] + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + params = layer._params.items() + params_status = layer._params_status.items() + params_zip = zip(params, params_status) + for params, params_status in params_zip: + if params_status[1] == False: + self._nontrainable_weights.append(params[1]) return self._nontrainable_weights @property def all_weights(self): - layers = self.layers_and_names(name_prefix='') - for layer_name, layer in layers: - params = layer._params.items() - for par, val in params: - self._all_weights.append(val) + """ + Returns all weights. + Returns a list of all weights. + + """ + + if self._all_weights is not None and len(self._all_weights) > 0: + # self._all_weights already extracted, so do nothing + pass + else: + self._all_weights = [] + layers = self.layers_and_names(name_prefix='') + for layer_name, layer in layers: + params = layer._params.items() + for par, val in params: + self._all_weights.append(val) return self._all_weights def get_weights(self, expand=True): """ Returns an iterator over layer weights. - Yields weights of this layer. If `expand` is True, yield parameters of this layer and all sublayers. - Args: - expand (bool): If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters - that are direct members of this layer. Default: True. + Parameters + ---------- + expand : bool + If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters + that are direct members of this layer. Default: True. + + Examples + --------- + >>> net = Net() + >>> for item in net.get_weights(): + >>> print(item) - Examples: - >>> net = Net() - >>> for item in net.get_weights(): - >>> print(item) """ + for _, param in self.parameters_and_names(expand=expand): yield param @@ -387,14 +426,15 @@ class Module(object): """ Adds a child layer to the current layer. - Args: - child_name (str): Name of the child layer. - child (Module): The child layer to be inserted. + Parameters + ---------- + child_name : str + Name of the child layer. + child : Module + The child layer to be inserted. - Raises: - KeyError: Child Module's name is incorrect or duplicated with the other child name. - TypeError: Child Module's type is incorrect. """ + if not child_name or '.' in child_name: raise KeyError("Child layer name is incorrect.") if hasattr(self, child_name) and child_name not in self._layers: @@ -409,18 +449,24 @@ class Module(object): Includes the parameter's name and itself. - Args: - name_prefix (str): Namespace. Default: ''. - expand (bool): If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters - that are direct members of this layer. Default: True. + Parameters + ---------- + name_prefix : str + Namespace. Default: ''. + expand : bool + If True, yields parameters of this layer and all sublayers. Otherwise, yields only parameters + that are direct members of this layer. Default: True. + + Examples + --------- + >>> n = Net() + >>> names = [] + >>> for m in n.parameters_and_names(): + >>> if m[0]: + >>> names.append(m[0]) - Examples: - >>> n = Net() - >>> names = [] - >>> for m in n.parameters_and_names(): - >>> if m[0]: - >>> names.append(m[0]) """ + layers = [] if expand: layers = self.layers_and_names(name_prefix=name_prefix) @@ -447,17 +493,23 @@ class Module(object): Includes the layer's name and itself. - Args: - layers (str): layers to iterate over. Default: None. - name_prefix (str): Namespace. Default: ''. + Parameters + ---------- + layers : str + layers to iterate over. Default: None. + name_prefix : str + Namespace. Default: ''. + + Examples + --------- + >>> n = Net() + >>> names = [] + >>> for m in n.layers_and_names(): + >>> if m[0]: + >>> names.append(m[0]) - Examples: - >>> n = Net() - >>> names = [] - >>> for m in n.layers_and_names(): - >>> if m[0]: - >>> names.append(m[0]) """ + t_layers = layers if layers else set() if self in t_layers: return @@ -475,6 +527,7 @@ class Module(object): def layers(self): """Returns an iterator over immediate layers.""" + return self.name_layers().values() def name_layers(self): @@ -483,6 +536,7 @@ class Module(object): Include name of the layer and layer itself. """ + value_set = set() layers = OrderedDict() for name, layer in self._layers.items(): @@ -494,7 +548,7 @@ class Module(object): def init_build(self, *inputs, **kwargs): """ (1) This method must be called when the Layer has no input in_channels. - (2) Automatic shape inference when the user does not enter inchannels. + (2) Automatic shape inference when the user does not enter in_channels. """ self.forward(*inputs, **kwargs) @@ -502,31 +556,35 @@ class Module(object): class SequentialLayer(Module): """ - Sequential layer container. - - A list of Layers will be added to it in the order they are passed in the constructor. - Alternatively, an ordered dict of layers can also be passed in. - - Args: - args (list, OrderedDict): List of subclass of Module. - - Raises: - TypeError: If the type of the argument is not list or OrderedDict. - - Inputs: - - **input** (Tensor) - Tensor with shape according to the first Module in the sequence. - - Outputs: - Tensor, the output Tensor with shape depending on the input and defined sequence of Layers. + The class :class:`SequentialLayer` is a linear stack of layers. + The :class:`SequentialLayer` can be created by passing a list of layer instances. + The given layer instances will be automatically connected one by one. + Parameters + ---------- + layers: list of Layer + A list of layers. + name : str or None + A unique layer name. If None, a unique name will be automatically assigned. + Methods + --------- + __init__() + Initializing the LayerList. + weights() + A collection of weights of all the layer instances. + build() + Build the LayerList. The layer instances will be connected automatically one by one. + forward() + Forward the computation. The computation will go through all layer instances. - Examples: - >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') - >>> bn = tl.layers.BatchNorm2d(2) - >>> seq = tl.layers.SequentialLayer([conv, bn]) - >>> - >>> x = tl.layers.Input((1, 3, 4, 4)) - >>> seq(x) + Examples + --------- + >>> conv = tl.layers.Conv2d(3, 2, 3, pad_mode='valid') + >>> bn = tl.layers.BatchNorm2d(2) + >>> seq = tl.layers.SequentialLayer([conv, bn]) + >>> x = tl.layers.Input((1, 3, 4, 4)) + >>> seq(x) """ + def __init__(self, *args): super(SequentialLayer, self).__init__() self._built = True @@ -547,21 +605,20 @@ class SequentialLayer(Module): def __getitem__(self, index): if isinstance(index, slice): - return self.__class__( - OrderedDict(list(self._layers.items())[index])) - index = self._valid_index(len(self), index) + return self.__class__(OrderedDict(list(self._layers.items())[index])) + index = _valid_index(len(self), index) return list(self._layers.values())[index] def __setitem__(self, index, layer): - if self._valid_module(layer): - index = self._valid_index(len(self), index) + if _valid_module(layer): + index = _valid_index(len(self), index) key = list(self._layers.keys())[index] self._layers[key] = layer self.layer_list = list(self._layers.values()) def __delitem__(self, index): if isinstance(index, int): - index = self._valid_index(len(self), index) + index = _valid_index(len(self), index) key = list(self._layers.keys())[index] del self._layers[key] elif isinstance(index, slice): @@ -575,9 +632,8 @@ class SequentialLayer(Module): def __len__(self): return len(self._layers) - def append(self, layer): - if self._valid_module(layer): + if _valid_module(layer): self._layers[str(len(self))] = layer self.layer_list = list(self._layers.values()) return self @@ -590,176 +646,131 @@ class SequentialLayer(Module): input_data = layer(input_data) return input_data - def _valid_index(self, layer_num, index): - if not isinstance(index, int): - raise TypeError("Index {} is not int type") - if not -layer_num <= index < layer_num: - raise IndexError("Index should be a number in range [{}, {}), but got {}" - .format(-layer_num, layer_num, index)) - return index % layer_num - - def _valid_module(self, layer): - if issubclass(layer.__class__, Module): - return True - raise TypeError('Module {} is not subclass of Module'.format(layer)) - class LayerList(Module): """ - The class :class:`LayerList` is a linear stack of layers. + Holds Modules in a list. - The :class:`LayerList` can be created by passing a list of layer instances. - The given layer instances will be automatically connected one by one. + LayerList can be used like a regular Python list, support + '__getitem__', '__setitem__', '__delitem__', '__len__', '__iter__' and '__iadd__', + but module it contains are properly registered, and will be visible by all Modules methods. Parameters ---------- - layers: list of Layer - A list of layers. - name : str or None - A unique layer name. If None, a unique name will be automatically assigned. - + args : list + List of subclass of Module. Methods --------- __init__() - Initializing the LayerList. - weights() - A collection of weights of all the layer instances. - build() - Build the LayerList. The layer instances will be connected automatically one by one. - forward() - Forward the computation. The computation will go through all layer instances. + Initializing the Layer. + insert() + Inserts a given layer before a given index in the list. + extend() + Appends layers from a Python iterable to the end of the list. + append() + Appends a given layer to the end of the list. + + Examples + --------- + Args: + args (list, optional): List of subclass of Module. + + Examples: + """ - def __init__(self, layers, name=None): - """ - Initializing the LayerList given a list of Layer. - - :param layers: list of Layer - :param name: str or None - """ - - super(LayerList, self).__init__(name=name) - self.layers = layers - is_built = True - for layer in self.layers: - self._trainable_weights.extend(layer.trainable_weights) - self._nontrainable_weights.extend(layer.nontrainable_weights) - if layer._built is False: - is_built = False - # if layer._built and layer.all_weights is not None: - # # some layers in the list passed in have already been built - # # e.g. using input shape to construct layers in dynamic eager - # if self._all_weights is None: - # self._all_weights = list() - # self._all_weights.extend(layer.all_weights) - if is_built: - self._built = True - - logging.info( - "LayerList %s including layers [%s]" % (self.name, ', '.join([layer.name for layer in self.layers])) - ) + def __init__(self, args): + super(LayerList, self).__init__() + self.extend(args) - # check layer name uniqueness in LayerList - local_layer_name_set = set() - for layer in self.layers: - if layer.name not in local_layer_name_set: - local_layer_name_set.add(layer.name) - else: - raise ValueError( - 'Layer name \'%s\' has already been used by another layer. Please change the layer name.' % - layer.name - ) + def __getitem__(self, index): + if isinstance(index, slice): + return self.__class__(list(self._layers.values())[index]) + if isinstance(index, int): + index = _valid_index(len(self), index) + return self._layers[str(index)] + raise TypeError('Index {} is not int type or slice type'.format(index)) + + def __setitem__(self, index, layer): + if not isinstance(index, int) and _valid_module(layer): + raise TypeError('Index {} is not int type'.format(index)) + index = _valid_index(len(self), index) + self._layers[str(index)] = layer - def __getitem__(self, idx): - if isinstance(idx, slice): - return LayerList(list(self.layers)[idx]) + def __delitem__(self, index): + if isinstance(index, int): + index = _valid_index(len(self), index) + del self._layers[str(index)] + elif isinstance(index, slice): + keys = list(self._layers.keys())[index] + for key in keys: + del self._layers[key] else: - return self.layers[idx] + raise TypeError('Index {} is not int type or slice type'.format(index)) + temp_dict = OrderedDict() + for idx, layer in enumerate(self._layers.values()): + temp_dict[str(idx)] = layer + self._layers = temp_dict def __len__(self): - return len(self.layers) + return len(self._layers) - def __repr__(self): - tmpstr = 'LayerList' + '(\n' - for idx, layer in enumerate(self.layers): - modstr = layer.__repr__() - modstr = _addindent(modstr, 2) - tmpstr = tmpstr + ' (' + str(idx) + '): ' + modstr + '\n' + def __iter__(self): + return iter(self._layers.values()) - tmpstr = tmpstr + ')' - return tmpstr + def __iadd__(self, layers): + self.extend(layers) + return self - @property - def trainable_weights(self): - return self._trainable_weights + def insert(self, index, layer): + """ + Inserts a given layer before a given index in the list. - @property - def nontrainable_weights(self): - return self._nontrainable_weights + """ - @property - def all_weights(self): - return self._trainable_weights + self._nontrainable_weights - - # def build(self, inputs_shape): - # """ - # Build the LayerList. The layer instances will be connected automatically one by one. - # """ - # in_tensor = self._input_tensors - # # in_layer = self._input_layer - # for layer in self.layers: - # is_build = layer._built - # out_tensor = layer(in_tensor) - # # nlayer = layer(in_layer) - # if is_build is False and layer.all_weights is not None: - # if self._all_weights is None: - # self._all_weights = list() - # self._all_weights.extend(layer.all_weights) - # layer._built = True - # in_tensor = out_tensor - # # in_layer = nlayer - - def forward(self, inputs): + idx = _valid_index(len(self), index) + _valid_module(layer) + length = len(self) + while length > idx: + self._layers[str(length)] = self._layers[str(length - 1)] + length -= 1 + self._layers[str(idx)] = layer + + def extend(self, layers): """ - Forward the computation. The computation will go through all layer instances. + Appends layers from a Python iterable to the end of the list. + """ - z = inputs - for layer in self.layers: - z = layer.forward(z) - return z - def _set_mode_for_layers(self, is_train): - """Set training/evaluation mode for all layer instances.""" - self.is_train = is_train - for layer in self.layers: - if isinstance(layer, LayerList): - layer._set_mode_for_layers(is_train) - else: - layer.is_train = is_train + if not isinstance(layers, list): + raise TypeError('Modules {} should be list of sublayers'.format(layers)) + for layer in layers: + if _valid_module(layer): + self._layers[str(len(self))] = layer + return self + + def append(self, layer): + """ + Appends a given layer to the end of the list. + + """ + + if _valid_module(layer): + self._layers[str(len(self))] = layer + + def forward(self, *inputs): + raise NotImplementedError + + +def _valid_index(layer_num, index): + if not isinstance(index, int): + raise TypeError("Index {} is not int type") + if not -layer_num <= index < layer_num: + raise IndexError("Index should be a number in range [{}, {}), but got {}".format(-layer_num, layer_num, index)) + return index % layer_num + - def get_args(self): - init_args = {} - layers = self.layer_args["layers"] - init_args["layers"] = [layer.config for layer in layers] - init_args.update({"layer_type": "layerlist"}) - return init_args - -def tolist(tensors): - if isinstance(tensors, list) or isinstance(tensors, tuple): - ntensors = list() - for t in tensors: - ntensors += tolist(t) - return ntensors - else: - return [tensors] - -def _addindent(s_, numSpaces): - s = s_.split('\n') - # don't do anything for single-line stuff - if len(s) == 1: - return s_ - first = s.pop(0) - s = [(numSpaces * ' ') + line for line in s] - s = '\n'.join(s) - s = first + '\n' + s - return s \ No newline at end of file +def _valid_module(layer): + if issubclass(layer.__class__, Module): + return True + raise TypeError('Module {} is not subclass of Module'.format(layer)) diff --git a/tensorlayer/layers/dense/base_dense.py b/tensorlayer/layers/dense/base_dense.py index a047030..acc3447 100644 --- a/tensorlayer/layers/dense/base_dense.py +++ b/tensorlayer/layers/dense/base_dense.py @@ -34,10 +34,10 @@ class Dense(Module): With TensorLayer >>> net = tl.layers.Input([100, 50], name='input') - >>> dense = tl.layers.Dense(n_units=800, act=tl.ops.relu, in_channels=50, name='dense_1') + >>> dense = tl.layers.Dense(n_units=800, act=tl.ReLU, in_channels=50, name='dense_1') >>> print(dense) Dense(n_units=800, relu, in_channels='50', name='dense_1') - >>> tensor = tl.layers.Dense(n_units=800, act=tl.ops.relu, name='dense_2')(net) + >>> tensor = tl.layers.Dense(n_units=800, act=tl.ReLU, name='dense_2')(net) >>> print(tensor) tf.Tensor([...], shape=(100, 800), dtype=float32) @@ -47,7 +47,6 @@ class Dense(Module): """ - # @cell_attr_register def __init__( self, n_units, diff --git a/tensorlayer/layers/dense/binary_dense.py b/tensorlayer/layers/dense/binary_dense.py index 90c6e2b..24fab5c 100644 --- a/tensorlayer/layers/dense/binary_dense.py +++ b/tensorlayer/layers/dense/binary_dense.py @@ -34,6 +34,14 @@ class BinaryDense(Module): name : None or str A unique layer name. + Examples + -------- + >>> net = tl.layers.Input([10, 784], name='input') + >>> net = tl.layers.BinaryDense(n_units=800, act=tl.ReLU, name='relu1')(net) + >>> output shape :(10, 800) + >>> net = tl.layers.BinaryDense(n_units=10, name='output')(net) + >>> output shape : (10, 10) + """ def __init__( @@ -90,7 +98,6 @@ class BinaryDense(Module): self.matmul = tl.ops.MatMul() - def forward(self, inputs): if self._forward_state == False: if self._built == False: diff --git a/tensorlayer/layers/dense/dorefa_dense.py b/tensorlayer/layers/dense/dorefa_dense.py index bf35c14..f54a228 100644 --- a/tensorlayer/layers/dense/dorefa_dense.py +++ b/tensorlayer/layers/dense/dorefa_dense.py @@ -39,6 +39,14 @@ class DorefaDense(Module): name : a str A unique layer name. + Examples + -------- + >>> net = tl.layers.Input([10, 784], name='input') + >>> net = tl.layers.DorefaDense(n_units=800, act=tl.ReLU, name='relu1')(net) + >>> output shape :(10, 800) + >>> net = tl.layers.DorefaDense(n_units=10, name='output')(net) + >>> output shape :(10, 10) + """ def __init__( @@ -113,4 +121,4 @@ class DorefaDense(Module): outputs = self.bias_add(outputs, self.b) if self.act: outputs = self.act(outputs) - return outputs \ No newline at end of file + return outputs diff --git a/tensorlayer/layers/dense/dropconnect.py b/tensorlayer/layers/dense/dropconnect.py index 178ea2c..3e0e3e2 100644 --- a/tensorlayer/layers/dense/dropconnect.py +++ b/tensorlayer/layers/dense/dropconnect.py @@ -38,13 +38,13 @@ class DropconnectDense(Module): Examples -------- - >>> net = tl.layers.Input([None, 784], name='input') - >>> net = tl.layers.DropconnectDense(keep=0.8, - ... n_units=800, act=tl.ReLU, name='relu1')(net) - >>> net = tl.layers.DropconnectDense(keep=0.5, - ... n_units=800, act=tl.ReLU, name='relu2')(net) - >>> net = tl.layers.DropconnectDense(keep=0.5, - ... n_units=10, name='output')(net) + >>> net = tl.layers.Input([10, 784], name='input') + >>> net = tl.layers.DropconnectDense(keep=0.8, n_units=800, act=tl.ReLU, name='relu1')(net) + >>> output shape :(10, 800) + >>> net = tl.layers.DropconnectDense(keep=0.5, n_units=800, act=tl.ReLU, name='relu2')(net) + >>> output shape :(10, 800) + >>> net = tl.layers.DropconnectDense(keep=0.5, n_units=10, name='output')(net) + >>> output shape :(10, 10) References ---------- diff --git a/tensorlayer/layers/dense/quan_dense.py b/tensorlayer/layers/dense/quan_dense.py index 4604023..a055675 100644 --- a/tensorlayer/layers/dense/quan_dense.py +++ b/tensorlayer/layers/dense/quan_dense.py @@ -37,6 +37,14 @@ class QuanDense(Module): name : None or str A unique layer name. + Examples + -------- + >>> net = tl.layers.Input([10, 784], name='input') + >>> net = tl.layers.BinaryDense(n_units=800, act=tl.ReLU, name='relu1')(net) + >>> output shape :(10, 800) + >>> net = tl.layers.BinaryDense(n_units=10, name='output')(net) + >>> output shape :(10, 10) + """ def __init__( diff --git a/tensorlayer/layers/dense/quan_dense_bn.py b/tensorlayer/layers/dense/quan_dense_bn.py index 3f811a2..0c40c7d 100644 --- a/tensorlayer/layers/dense/quan_dense_bn.py +++ b/tensorlayer/layers/dense/quan_dense_bn.py @@ -1,14 +1,13 @@ #! /usr/bin/python # -*- coding: utf-8 -*- - - import tensorlayer as tl from tensorlayer import logging from tensorlayer.layers.core import Module from tensorflow.python.training import moving_averages -from tensorlayer.layers.utils import (quantize_active_overflow, quantize_weight_overflow, - mean_var_with_update, w_fold, bias_fold) +from tensorlayer.layers.utils import ( + quantize_active_overflow, quantize_weight_overflow, mean_var_with_update, w_fold, bias_fold +) __all__ = [ 'QuanDenseWithBN', @@ -142,7 +141,6 @@ class QuanDenseWithBN(Module): "moving_variacne", shape=para_bn_shape, init=tl.initializers.constant(1.0), trainable=False ) - def forward(self, inputs): if self._forward_state == False: if self._built == False: diff --git a/tensorlayer/layers/deprecated.py b/tensorlayer/layers/deprecated.py index 2cb6699..cc44742 100644 --- a/tensorlayer/layers/deprecated.py +++ b/tensorlayer/layers/deprecated.py @@ -15,7 +15,7 @@ __all__ += [ 'PTRelu6Layer', ] -__log__ = '\n Hint: 1) downgrade TF and TL from version 2.x to 1.x. 2) check the documentation of TF and TL version 2.x' +__log__ = '\n Hint: 1) downgrade TL from version 3.x to 2.x. 2) check the documentation of TF version 2.x and TL version 3.x' def PReluLayer(*args, **kwargs): @@ -414,3 +414,26 @@ __all__ += [ def TimeDistributedLayer(*args, **kwargs): # raise NonExistingLayerError("TimeDistributedLayer(x1, x2, name='a') --> TimeDistributed(name='a')(x1, x2)") raise NonExistingLayerError("TimeDistributedLayer is removed for TF 2.0, please use eager mode instead." + __log__) + + +__all__ += ['ModelLayer'] + + +def ModelLayer(*args, **kwargs): + raise NonExistingLayerError("ModelLayer is removed for TensorLayer 3.0.") + + +__all__ += ['Seq2seqLuongAttention'] + + +def Seq2seqLuongAttention(*args, **kwargs): + raise NonExistingLayerError("Seq2seqLuongAttention is removed for TensorLayer 3.0.") + + +__all__ += ['cross_entropy'] + + +def cross_entropy(*args, **kwargs): + raise NonExistingLayerError( + "cross_entropy(output, target) --> softmax_cross_entropy_with_logits(output, target)" + __log__ + ) diff --git a/tensorlayer/layers/dropout.py b/tensorlayer/layers/dropout.py index 8dccda6..54c9ba5 100644 --- a/tensorlayer/layers/dropout.py +++ b/tensorlayer/layers/dropout.py @@ -25,6 +25,11 @@ class Dropout(Module): name : None or str A unique layer name. + Examples + -------- + >>> net = tl.layers.Input([10, 200]) + >>> net = tl.layers.Dropout(keep=0.2)(net) + """ def __init__(self, keep, seed=0, name=None): #"dropout"): diff --git a/tensorlayer/layers/embedding.py b/tensorlayer/layers/embedding.py index a6b4313..84e4b56 100644 --- a/tensorlayer/layers/embedding.py +++ b/tensorlayer/layers/embedding.py @@ -4,7 +4,6 @@ import tensorlayer as tl from tensorlayer import logging from tensorlayer.layers.core import Module -# from tensorlayer.layers.core import LayersConfig __all__ = ['OneHot', 'Word2vecEmbedding', 'Embedding', 'AverageEmbedding'] @@ -25,21 +24,19 @@ class OneHot(Module): axis : None or int The axis. dtype : None or TensorFlow dtype - The data type, None means tf.float32. + The data type, None means tl.float32. name : str A unique layer name. Examples --------- - >>> import tensorflow as tf - >>> import tensorlayer as tl >>> net = tl.layers.Input([32], dtype=tl.int32) >>> onehot = tl.layers.OneHot(depth=8) >>> print(onehot) OneHot(depth=8, name='onehot') >>> tensor = tl.layers.OneHot(depth=8)(net) >>> print(tensor) - tf.Tensor([...], shape=(32, 8), dtype=float32) + Tensor([...], shape=(32, 8), dtype=float32) """ @@ -141,12 +138,11 @@ class Word2vecEmbedding(Module): -------- Word2Vec With TensorLayer (Example in `examples/text_word_embedding/tutorial_word2vec_basic.py`) - >>> import tensorflow as tf >>> import tensorlayer as tl >>> batch_size = 8 >>> embedding_size = 50 - >>> inputs = tl.layers.Input([batch_size], dtype=tf.int32) - >>> labels = tl.layers.Input([batch_size, 1], dtype=tf.int32) + >>> inputs = tl.layers.Input([batch_size], dtype=tl.int32) + >>> labels = tl.layers.Input([batch_size, 1], dtype=tl.int32) >>> emb_net = tl.layers.Word2vecEmbedding( >>> vocabulary_size=10000, >>> embedding_size=embedding_size, @@ -331,15 +327,14 @@ class Embedding(Module): Examples -------- - >>> import tensorflow as tf >>> import tensorlayer as tl - >>> input = tl.layers.Input([8, 100], dtype=tf.int32) + >>> input = tl.layers.Input([8, 100], dtype=tl.int32) >>> embed = tl.layers.Embedding(vocabulary_size=1000, embedding_size=50, name='embed') >>> print(embed) Embedding(vocabulary_size=1000, embedding_size=50) >>> tensor = embed(input) >>> print(tensor) - tf.Tensor([...], shape=(8, 100, 50), dtype=float32) + Tensor([...], shape=(8, 100, 50), dtype=float32) """ @@ -423,17 +418,16 @@ class AverageEmbedding(Module): Examples --------- - >>> import tensorflow as tf >>> import tensorlayer as tl >>> batch_size = 8 >>> length = 5 - >>> input = tl.layers.Input([batch_size, length], dtype=tf.int32) + >>> input = tl.layers.Input([batch_size, length], dtype=tl.int32) >>> avgembed = tl.layers.AverageEmbedding(vocabulary_size=1000, embedding_size=50, name='avg') >>> print(avgembed) AverageEmbedding(vocabulary_size=1000, embedding_size=50, pad_value=0) >>> tensor = avgembed(input) >>> print(tensor) - tf.Tensor([...], shape=(8, 50), dtype=float32) + Tensor([...], shape=(8, 50), dtype=float32) """ @@ -482,11 +476,11 @@ class AverageEmbedding(Module): init=self.E_init, ) self.embedding_lookup = tl.EmbeddingLookup() - self.not_equal = tl.Not_equal() + self.not_equal = tl.NotEqual() self.cast = tl.Cast(tl.float32) self.expand_dims = tl.ExpandDims(axis=-1) self.reduce_sum = tl.ReduceSum(axis=1) - self.count_nonzero = tl.Count_nonzero(keepdims=True, dtype=tl.float32) + self.count_nonzero = tl.CountNonzero(keepdims=True, dtype=tl.float32) def forward(self, inputs): """ @@ -505,7 +499,7 @@ class AverageEmbedding(Module): # Count number of non-padding words in each sentence sentence_lengths = self.count_nonzero(masks, axis=1) - + print(masks, sentence_lengths) sentence_embeddings = tl.ops.divide( sum_word_embeddings, sentence_lengths + 1e-8, # Add epsilon to avoid dividing by 0 @@ -514,4 +508,3 @@ class AverageEmbedding(Module): outputs = sentence_embeddings return outputs - diff --git a/tensorlayer/layers/extend.py b/tensorlayer/layers/extend.py index 9f765c5..9c48da2 100644 --- a/tensorlayer/layers/extend.py +++ b/tensorlayer/layers/extend.py @@ -2,7 +2,6 @@ # -*- coding: utf-8 -*- import tensorlayer as tl - from tensorlayer import logging from tensorlayer.layers.core import Module @@ -77,6 +76,7 @@ class Tile(Module): -------- >>> x = tl.layers.Input([10, 3], name='in') >>> y = tl.layers.Tile(multiples=[2, 3])(x) + """ def __init__(self, multiples=None, name=None): #'tile'): diff --git a/tensorlayer/layers/image_resampling.py b/tensorlayer/layers/image_resampling.py index a676a34..f017388 100644 --- a/tensorlayer/layers/image_resampling.py +++ b/tensorlayer/layers/image_resampling.py @@ -37,9 +37,9 @@ class UpSampling2d(Module): --------- With TensorLayer - >>> ni = tl.layers.Input([None, 50, 50, 32], name='input') + >>> ni = tl.layers.Input([10, 50, 50, 32], name='input') >>> ni = tl.layers.UpSampling2d(scale=(2, 2))(ni) - >>> output shape : [None, 100, 100, 32] + >>> output shape : [10, 100, 100, 32] """ @@ -85,6 +85,7 @@ class UpSampling2d(Module): outputs = self.resize(inputs) return outputs + class DownSampling2d(Module): """The :class:`DownSampling2d` class is down-sampling 2D layer. @@ -111,21 +112,13 @@ class DownSampling2d(Module): --------- With TensorLayer - >>> ni = tl.layers.Input([None, 50, 50, 32], name='input') + >>> ni = tl.layers.Input([10, 50, 50, 32], name='input') >>> ni = tl.layers.DownSampling2d(scale=(2, 2))(ni) - >>> output shape : [None, 25, 25, 32] + >>> output shape : [10, 25, 25, 32] """ - def __init__( - self, - scale, - method='bilinear', - antialias=False, - data_format='channels_last', - name=None, - ksize=None - ): + def __init__(self, scale, method='bilinear', antialias=False, data_format='channels_last', name=None, ksize=None): super(DownSampling2d, self).__init__(name) self.method = method self.antialias = antialias @@ -153,8 +146,7 @@ class DownSampling2d(Module): def build(self, inputs_shape): scale = [1.0 / self.scale[0], 1.0 / self.scale[1]] self.resize = tl.ops.Resize( - scale=scale, method=self.method, antialias=self.antialias, data_format=self.data_format, - ksize=self.ksize + scale=scale, method=self.method, antialias=self.antialias, data_format=self.data_format, ksize=self.ksize ) def forward(self, inputs): @@ -167,4 +159,4 @@ class DownSampling2d(Module): """ outputs = self.resize(inputs) - return outputs \ No newline at end of file + return outputs diff --git a/tensorlayer/layers/inputs.py b/tensorlayer/layers/inputs.py index 34a8778..cbcd76f 100644 --- a/tensorlayer/layers/inputs.py +++ b/tensorlayer/layers/inputs.py @@ -64,6 +64,13 @@ def Input(shape, init=tl.initializers.ones(), dtype=tl.float32, name=None): name : None or str A unique layer name. + Examples + --------- + With TensorLayer + + >>> ni = tl.layers.Input([10, 50, 50, 32], name='input') + >>> output shape : [10, 50, 50, 32] + """ input_layer = _InputLayer(shape, dtype=dtype, name=name, init=init) outputs = input_layer() diff --git a/tensorlayer/layers/lambda_layers.py b/tensorlayer/layers/lambda_layers.py index 1184f29..75f95c1 100644 --- a/tensorlayer/layers/lambda_layers.py +++ b/tensorlayer/layers/lambda_layers.py @@ -2,7 +2,6 @@ # -*- coding: utf-8 -*- import tensorflow as tf - from tensorlayer import logging from tensorlayer.files import utils from tensorlayer.layers.core import Module @@ -54,7 +53,7 @@ class Lambda(Module): Please avoid using Model.save() / Model.load() to save / load models that contain such Lambda layer. Instead, you may use Model.save_weights() / Model.load_weights() to save / load model weights. Note: In this case, fn_weights should be a list, and then the trainable weights in this Lambda layer can be added into the weights of the whole model. - >>> a = tf.Variable(1.0) + >>> a = tl.ops.Variable(1.0) >>> def func(x): >>> return x + a >>> x = tl.layers.Input([8, 3], name='input') @@ -65,15 +64,15 @@ class Lambda(Module): This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). >>> layers = [ - >>> tf.keras.layers.Dense(10, activation=tf.nn.relu), - >>> tf.keras.layers.Dense(5, activation=tf.nn.sigmoid), - >>> tf.keras.layers.Dense(1, activation=tf.identity) + >>> tl.layers.Dense(10, act=tl.Relu), + >>> tl.layers.Dense(5, act=tl.Relu), + >>> tl.layers.Dense(1, activation=tf.identity) >>> ] - >>> perceptron = tf.keras.Sequential(layers) + >>> perceptron = tl.layers.SequentialLayer(layers) >>> # in order to compile keras model and get trainable_variables of the keras model >>> _ = perceptron(np.random.random([100, 5]).astype(np.float32)) >>> - >>> class CustomizeModel(tl.models.Model): + >>> class CustomizeModel(tl.layers.Module): >>> def __init__(self): >>> super(CustomizeModel, self).__init__() >>> self.dense = tl.layers.Dense(in_channels=1, n_units=5) @@ -86,7 +85,7 @@ class Lambda(Module): >>> >>> optimizer = tl.optimizers.Adam(learning_rate=0.1) >>> model = CustomizeModel() - >>> model.train() + >>> model.set_train() >>> >>> for epoch in range(50): >>> with tf.GradientTape() as tape: @@ -185,7 +184,6 @@ class ElementwiseLambda(Module): Non-parametric and with args case This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). - >>> # z = mean + noise * tf.exp(std * 0.5) + foo >>> def func(noise, mean, std, foo=42): >>> return mean + noise * tf.exp(std * 0.5) + foo >>> noise = tl.layers.Input([100, 1]) @@ -197,7 +195,6 @@ class ElementwiseLambda(Module): Non-parametric and non-args case This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional). - >>> # z = mean + noise * tf.exp(std * 0.5) >>> noise = tl.layers.Input([100, 1]) >>> mean = tl.layers.Input([100, 1]) >>> std = tl.layers.Input([100, 1]) @@ -209,7 +206,6 @@ class ElementwiseLambda(Module): Please avoid using Model.save() / Model.load() to save / load models that contain such ElementwiseLambda layer. Instead, you may use Model.save_weights() / Model.load_weights() to save / load model weights. Note: In this case, fn_weights should be a list, and then the trainable weights in this ElementwiseLambda layer can be added into the weights of the whole model. - >>> # z = mean + noise * tf.exp(std * 0.5) + vara >>> vara = [tf.Variable(1.0)] >>> def func(noise, mean, std): >>> return mean + noise * tf.exp(std * 0.5) + vara @@ -277,4 +273,4 @@ class ElementwiseLambda(Module): else: outputs = self.fn(*inputs, **kwargs) - return outputs \ No newline at end of file + return outputs diff --git a/tensorlayer/layers/merge.py b/tensorlayer/layers/merge.py index 8e41d5a..3fc5737 100644 --- a/tensorlayer/layers/merge.py +++ b/tensorlayer/layers/merge.py @@ -2,7 +2,6 @@ # -*- coding: utf-8 -*- import tensorlayer as tl - from tensorlayer import logging from tensorlayer.layers.core import Module @@ -27,8 +26,8 @@ class Concat(Module): >>> class CustomModel(Module): >>> def __init__(self): >>> super(CustomModel, self).__init__(name="custom") - >>> self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu1_1') - >>> self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu2_1') + >>> self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tl.ReLU, name='relu1_1') + >>> self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tl.ReLU, name='relu2_1') >>> self.concat = tl.layers.Concat(concat_dim=1, name='concat_layer') >>> def forward(self, inputs): @@ -70,6 +69,7 @@ class Concat(Module): outputs = self.concat(inputs) return outputs + class Elementwise(Module): """A layer that combines multiple :class:`Layer` that have the same output shapes according to an element-wise operation. @@ -78,7 +78,7 @@ class Elementwise(Module): Parameters ---------- combine_fn : a TensorFlow element-wise combine function - e.g. AND is ``tf.minimum`` ; OR is ``tf.maximum`` ; ADD is ``tf.add`` ; MUL is ``tf.multiply`` and so on. + e.g. AND is ``tl.minimum`` ; OR is ``tl.maximum`` ; ADD is ``tl.add`` ; MUL is ``tl.multiply`` and so on. See `TensorFlow Math API `__ . If the combine function is more complicated, please consider to use :class:`ElementwiseLambda`. act : activation function @@ -91,9 +91,9 @@ class Elementwise(Module): >>> class CustomModel(tl.models.Model): >>> def __init__(self): >>> super(CustomModel, self).__init__(name="custom") - >>> self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu1_1') - >>> self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu2_1') - >>> self.element = tl.layers.Elementwise(combine_fn=tf.minimum, name='minimum', act=tf.identity) + >>> self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tl.ReLU, name='relu1_1') + >>> self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tl.ReLU, name='relu2_1') + >>> self.element = tl.layers.Elementwise(combine_fn=tl.minimum, name='minimum', act=tl.identity) >>> def forward(self, inputs): >>> d1 = self.dense1(inputs) @@ -139,4 +139,4 @@ class Elementwise(Module): outputs = self.combine_fn(outputs, input) if self.act: outputs = self.act(outputs) - return outputs \ No newline at end of file + return outputs diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py index 5ab2e89..613a19f 100644 --- a/tensorlayer/layers/normalization.py +++ b/tensorlayer/layers/normalization.py @@ -11,6 +11,15 @@ __all__ = [ 'BatchNorm2d', 'BatchNorm3d', ] +# TODO Layers that needs to be updated +# ['InstanceNorm', +# 'InstanceNorm1d', +# 'InstanceNorm2d', +# 'InstanceNorm3d', +# 'LayerNorm', +# 'GroupNorm', +# 'SwitchNorm', +# ] class BatchNorm(Module): @@ -52,7 +61,7 @@ class BatchNorm(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 32], name='input') >>> net = tl.layers.BatchNorm()(net) Notes @@ -106,7 +115,6 @@ class BatchNorm(Module): self.build(None) self._built = True - if self.decay < 0.0 or 1.0 < self.decay: raise ValueError("decay should be between 0 to 1") @@ -171,6 +179,7 @@ class BatchNorm(Module): self.act_init_flag = True def forward(self, inputs): + self._check_input_shape(inputs) if self._forward_state == False: if self._built == False: self.build(tl.get_tensor_shape(inputs)) @@ -180,8 +189,7 @@ class BatchNorm(Module): if not self.is_train: self.batchnorm = tl.ops.BatchNorm( decay=self.decay, epsilon=self.epsilon, beta=self.beta, gamma=self.gamma, moving_mean=self.moving_mean, - moving_var=self.moving_var, num_features=self.num_features, data_format=self.data_format, - is_train=False + moving_var=self.moving_var, num_features=self.num_features, data_format=self.data_format, is_train=False ) outputs = self.batchnorm(inputs=inputs) if self.act_init_flag: @@ -199,7 +207,7 @@ class BatchNorm1d(BatchNorm): With TensorLayer >>> # in static model, no need to specify num_features - >>> net = tl.layers.Input([None, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 32], name='input') >>> net = tl.layers.BatchNorm1d()(net) >>> # in dynamic model, build by specifying num_features >>> conv = tl.layers.Conv1d(32, 5, 1, in_channels=3) @@ -222,7 +230,7 @@ class BatchNorm2d(BatchNorm): With TensorLayer >>> # in static model, no need to specify num_features - >>> net = tl.layers.Input([None, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 32], name='input') >>> net = tl.layers.BatchNorm2d()(net) >>> # in dynamic model, build by specifying num_features >>> conv = tl.layers.Conv2d(32, (5, 5), (1, 1), in_channels=3) @@ -245,7 +253,7 @@ class BatchNorm3d(BatchNorm): With TensorLayer >>> # in static model, no need to specify num_features - >>> net = tl.layers.Input([None, 50, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 50, 32], name='input') >>> net = tl.layers.BatchNorm3d()(net) >>> # in dynamic model, build by specifying num_features >>> conv = tl.layers.Conv3d(32, (5, 5, 5), (1, 1), in_channels=3) diff --git a/tensorlayer/layers/padding.py b/tensorlayer/layers/padding.py index 5a21b50..84695b7 100644 --- a/tensorlayer/layers/padding.py +++ b/tensorlayer/layers/padding.py @@ -30,10 +30,10 @@ class PadLayer(Module): -------- With TensorLayer - >>> net = tl.layers.Input([None, 224, 224, 3], name='input') + >>> net = tl.layers.Input([10, 224, 224, 3], name='input') >>> padlayer = tl.layers.PadLayer([[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT", name='inpad')(net) >>> print(padlayer) - >>> output shape : (None, 230, 230, 3) + >>> output shape : (10, 230, 230, 3) """ @@ -88,10 +88,10 @@ class ZeroPad1d(Module): -------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 1], name='input') + >>> net = tl.layers.Input([10, 100, 1], name='input') >>> pad1d = tl.layers.ZeroPad1d(padding=(3, 3))(net) >>> print(pad1d) - >>> output shape : (None, 106, 1) + >>> output shape : (10, 106, 1) """ @@ -142,10 +142,10 @@ class ZeroPad2d(Module): -------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 100, 3], name='input') + >>> net = tl.layers.Input([10, 100, 100, 3], name='input') >>> pad2d = tl.layers.ZeroPad2d(padding=((3, 3), (4, 4)))(net) >>> print(pad2d) - >>> output shape : (None, 106, 108, 3) + >>> output shape : (10, 106, 108, 3) """ @@ -189,7 +189,8 @@ class ZeroPad3d(Module): padding : int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int, the same symmetric padding is applied to width and height. - If tuple of 2 ints, interpreted as two different symmetric padding values for height and width as ``(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)``. - - If tuple of 2 tuples of 2 ints, interpreted as ``((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim3_pad))``. + - If tuple of 2 tuples of 2 ints, interpreted as + ``((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim3_pad))``. name : None or str A unique layer name. @@ -197,10 +198,10 @@ class ZeroPad3d(Module): -------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 100, 100, 3], name='input') + >>> net = tl.layers.Input([10, 100, 100, 100, 3], name='input') >>> pad3d = tl.layers.ZeroPad3d(padding=((3, 3), (4, 4), (5, 5)))(net) >>> print(pad3d) - >>> output shape : (None, 106, 108, 110, 3) + >>> output shape : (10, 106, 108, 110, 3) """ diff --git a/tensorlayer/layers/pooling.py b/tensorlayer/layers/pooling.py index 006b34d..560d9ec 100644 --- a/tensorlayer/layers/pooling.py +++ b/tensorlayer/layers/pooling.py @@ -55,9 +55,9 @@ class PoolLayer(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 32], name='input') >>> net = tl.layers.PoolLayer()(net) - >>> output shape : [None, 25, 25, 32] + >>> output shape : [10, 25, 25, 32] """ @@ -118,9 +118,9 @@ class MaxPool1d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 32], name='input') >>> net = tl.layers.MaxPool1d(filter_size=3, strides=2, padding='SAME', name='maxpool1d')(net) - >>> output shape : [None, 25, 32] + >>> output shape : [10, 25, 32] """ @@ -130,7 +130,6 @@ class MaxPool1d(Module): strides=2, padding='SAME', data_format='channels_last', - dilation_rate=1, name=None # 'maxpool1d' ): super().__init__(name) @@ -138,7 +137,6 @@ class MaxPool1d(Module): self.strides = self._strides = strides self.padding = padding self.data_format = data_format - self.dilation_rate = self._dilation_rate = dilation_rate self.build() self._built = True @@ -150,8 +148,6 @@ class MaxPool1d(Module): def __repr__(self): s = ('{classname}(filter_size={filter_size}' ', strides={strides}, padding={padding}') - if self.dilation_rate != 1: - s += ', dilation={dilation_rate}' if self.name is not None: s += ', name=\'{name}\'' s += ')' @@ -167,18 +163,12 @@ class MaxPool1d(Module): raise Exception("unsupported data format") self._filter_size = [self.filter_size] self._strides = [self.strides] - self._dilation_rate = [self.dilation_rate] + self.max_pool = tl.ops.MaxPool1d( + ksize=self._filter_size, strides=self._strides, padding=self.padding, data_format=self.data_format + ) def forward(self, inputs): - outputs = tl.ops.pool( - input=inputs, - window_shape=self._filter_size, - pooling_type="MAX", - strides=self._strides, - padding=self.padding, - data_format=self.data_format, - dilations=self._dilation_rate, - ) + outputs = self.max_pool(inputs) return outputs @@ -202,9 +192,9 @@ class MeanPool1d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 32], name='input') >>> net = tl.layers.MeanPool1d(filter_size=3, strides=2, padding='SAME')(net) - >>> output shape : [None, 25, 32] + >>> output shape : [10, 25, 32] """ @@ -222,7 +212,6 @@ class MeanPool1d(Module): self.strides = self._strides = strides self.padding = padding self.data_format = data_format - self.dilation_rate = self._dilation_rate = dilation_rate self.build() self._built = True @@ -234,8 +223,6 @@ class MeanPool1d(Module): def __repr__(self): s = ('{classname}(filter_size={filter_size}' ', strides={strides}, padding={padding}') - if self.dilation_rate != 1: - s += ', dilation={dilation_rate}' if self.name is not None: s += ', name=\'{name}\'' s += ')' @@ -251,13 +238,12 @@ class MeanPool1d(Module): raise Exception("unsupported data format") self._filter_size = [self.filter_size] self._strides = [self.strides] - self._dilation_rate = [self.dilation_rate] + self.avg_pool = tl.ops.AvgPool1d( + ksize=self._filter_size, strides=self._strides, padding=self.padding, data_format=self.data_format + ) def forward(self, inputs): - outputs = tl.ops.pool( - input=inputs, window_shape=self._filter_size, pooling_type="AVG", padding=self.padding, - dilations=self._dilation_rate, strides=self._strides, data_format=self.data_format - ) + outputs = self.avg_pool(inputs) return outputs @@ -281,9 +267,9 @@ class MaxPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 32], name='input') >>> net = tl.layers.MaxPool2d(filter_size=(3, 3), strides=(2, 2), padding='SAME')(net) - >>> output shape : [None, 25, 25, 32] + >>> output shape : [10, 25, 25, 32] """ @@ -357,9 +343,9 @@ class MeanPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 32], name='input') >>> net = tl.layers.MeanPool2d(filter_size=(3, 3), strides=(2, 2), padding='SAME')(net) - >>> output shape : [None, 25, 25, 32] + >>> output shape : [10, 25, 25, 32] """ @@ -437,9 +423,9 @@ class MaxPool3d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 50, 32], name='input') >>> net = tl.layers.MaxPool3d(filter_size=(3, 3, 3), strides=(2, 2, 2), padding='SAME')(net) - >>> output shape : [None, 25, 25, 25, 32] + >>> output shape : [10, 25, 25, 25, 32] """ @@ -481,15 +467,12 @@ class MaxPool3d(Module): self._strides = [1, 1, self.strides[0], self.strides[1], self.strides[2]] else: raise Exception("unsupported data format") + self.max_pool3d = tl.ops.MaxPool3d( + ksize=self.filter_size, strides=self._strides, padding=self.padding, data_format=self.data_format + ) def forward(self, inputs): - outputs = tl.ops.max_pool3d( - input=inputs, - ksize=self.filter_size, - strides=self._strides, - padding=self.padding, - data_format=self.data_format, - ) + outputs = self.max_pool3d(inputs) return outputs @@ -518,9 +501,9 @@ class MeanPool3d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 50, 50, 50, 32], name='input') + >>> net = tl.layers.Input([10, 50, 50, 50, 32], name='input') >>> net = tl.layers.MeanPool3d(filter_size=(3, 3, 3), strides=(2, 2, 2), padding='SAME')(net) - >>> output shape : [None, 25, 25, 25, 32] + >>> output shape : [10, 25, 25, 25, 32] """ @@ -561,15 +544,12 @@ class MeanPool3d(Module): self.data_format = 'NCDHW' else: raise Exception("unsupported data format") + self.avg_pool3d = tl.ops.AvgPool3d( + ksize=self.filter_size, strides=self._strides, padding=self.padding, data_format=self.data_format + ) def forward(self, inputs): - outputs = tl.ops.avg_pool3d( - input=inputs, - ksize=self.filter_size, - strides=self._strides, - padding=self.padding, - data_format=self.data_format, - ) + outputs = self.avg_pool3d(inputs) return outputs @@ -587,9 +567,9 @@ class GlobalMaxPool1d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 30], name='input') + >>> net = tl.layers.Input([10, 100, 30], name='input') >>> net = tl.layers.GlobalMaxPool1d()(net) - >>> output shape : [None, 30] + >>> output shape : [10, 30] """ @@ -643,9 +623,9 @@ class GlobalMeanPool1d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 30], name='input') + >>> net = tl.layers.Input([10, 100, 30], name='input') >>> net = tl.layers.GlobalMeanPool1d()(net) - >>> output shape : [None, 30] + >>> output shape : [10, 30] """ @@ -698,9 +678,9 @@ class GlobalMaxPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 100, 30], name='input') + >>> net = tl.layers.Input([10, 100, 100, 30], name='input') >>> net = tl.layers.GlobalMaxPool2d()(net) - >>> output shape : [None, 30] + >>> output shape : [10, 30] """ @@ -753,9 +733,9 @@ class GlobalMeanPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 100, 30], name='input') + >>> net = tl.layers.Input([10, 100, 100, 30], name='input') >>> net = tl.layers.GlobalMeanPool2d()(net) - >>> output shape : [None, 30] + >>> output shape : [10, 30] """ @@ -809,9 +789,9 @@ class GlobalMaxPool3d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 100, 100, 30], name='input') + >>> net = tl.layers.Input([10, 100, 100, 100, 30], name='input') >>> net = tl.layers.GlobalMaxPool3d()(net) - >>> output shape : [None, 30] + >>> output shape : [10, 30] """ @@ -865,9 +845,9 @@ class GlobalMeanPool3d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 100, 100, 100, 30], name='input') + >>> net = tl.layers.Input([10, 100, 100, 100, 30], name='input') >>> net = tl.layers.GlobalMeanPool3d()(net) - >>> output shape : [None, 30] + >>> output shape : [10, 30] """ @@ -892,17 +872,17 @@ class GlobalMeanPool3d(Module): return s.format(classname=self.__class__.__name__, **self.__dict__) def build(self, inputs_shape=None): - pass - - def forward(self, inputs): if self.data_format == 'channels_last': - outputs = tl.reduce_mean(input_tensor=inputs, axis=[1, 2, 3]) + self.reduce_mean = tl.ReduceMean(axis=[1, 2, 3]) elif self.data_format == 'channels_first': - outputs = tl.reduce_mean(input_tensor=inputs, axis=[2, 3, 4]) + self.reduce_mean = tl.ReduceMean(axis=[2, 3, 4]) else: raise ValueError( "`data_format` should have one of the following values: [`channels_last`, `channels_first`]" ) + + def forward(self, inputs): + outputs = self.reduce_mean(inputs) return outputs @@ -921,9 +901,9 @@ class CornerPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 32, 32, 8], name='input') + >>> net = tl.layers.Input([10, 32, 32, 8], name='input') >>> net = tl.layers.CornerPool2d(mode='TopLeft',name='cornerpool2d')(net) - >>> output shape : [None, 32, 32, 8] + >>> output shape : [10, 32, 32, 8] """ @@ -995,9 +975,9 @@ class AdaptiveMeanPool1d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 32, 3], name='input') + >>> net = tl.layers.Input([10, 32, 3], name='input') >>> net = tl.layers.AdaptiveMeanPool1d(output_size=16)(net) - >>> output shape : [None, 16, 3] + >>> output shape : [10, 16, 3] """ @@ -1050,9 +1030,9 @@ class AdaptiveMeanPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None,32, 32, 3], name='input') + >>> net = tl.layers.Input([10,32, 32, 3], name='input') >>> net = tl.layers.AdaptiveMeanPool2d(output_size=16)(net) - >>> output shape : [None,16, 16, 3] + >>> output shape : [10,16, 16, 3] """ @@ -1108,9 +1088,9 @@ class AdaptiveMeanPool3d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None,32, 32, 32, 3], name='input') + >>> net = tl.layers.Input([10,32, 32, 32, 3], name='input') >>> net = tl.layers.AdaptiveMeanPool3d(output_size=16)(net) - >>> output shape : [None, 16, 16, 16, 3] + >>> output shape : [10, 16, 16, 16, 3] """ @@ -1166,9 +1146,9 @@ class AdaptiveMaxPool1d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 32, 3], name='input') + >>> net = tl.layers.Input([10, 32, 3], name='input') >>> net = tl.layers.AdaptiveMaxPool1d(output_size=16)(net) - >>> output shape : [None, 16, 3] + >>> output shape : [10, 16, 3] """ @@ -1221,9 +1201,9 @@ class AdaptiveMaxPool2d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None, 32, 32, 3], name='input') + >>> net = tl.layers.Input([10, 32, 32, 3], name='input') >>> net = tl.layers.AdaptiveMaxPool2d(output_size=16)(net) - >>> output shape : [None, 16, 16, 3] + >>> output shape : [10, 16, 16, 3] """ @@ -1278,9 +1258,9 @@ class AdaptiveMaxPool3d(Module): --------- With TensorLayer - >>> net = tl.layers.Input([None,32, 32, 32, 3], name='input') + >>> net = tl.layers.Input([10,32, 32, 32, 3], name='input') >>> net = tl.layers.AdaptiveMaxPool3d(output_size=16)(net) - >>> output shape : [None, 16, 16, 16, 3] + >>> output shape : [10, 16, 16, 16, 3] """ diff --git a/tensorlayer/layers/quantize.py b/tensorlayer/layers/quantize.py index 1a64f63..02107c7 100644 --- a/tensorlayer/layers/quantize.py +++ b/tensorlayer/layers/quantize.py @@ -1,9 +1,6 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -import os -os.environ['TL_BACKEND'] = 'tensorflow' -import tensorlayer as tl from tensorlayer import logging from tensorlayer.layers.core import Module from tensorlayer.layers.utils import quantize diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py index 23d611a..5434cec 100644 --- a/tensorlayer/layers/recurrent.py +++ b/tensorlayer/layers/recurrent.py @@ -2,7 +2,1265 @@ # -*- coding: utf-8 -*- import numpy as np +import tensorflow as tf import tensorlayer as tl from tensorlayer import logging +from tensorlayer.decorators import deprecated_alias from tensorlayer.layers.core import Module + +# TODO: Need to update to version 3.0 +__all__ = [ + 'RNN', + 'SimpleRNN', + 'GRURNN', + 'LSTMRNN', + 'BiRNN', + # 'ConvRNNCell', + # 'BasicConvLSTMCell', + # 'ConvLSTM', + 'retrieve_seq_length_op', + 'retrieve_seq_length_op2', + 'retrieve_seq_length_op3', + 'target_mask_op', +] + + +class RNN(Module): + """ + The :class:`RNN` class is a fixed length recurrent layer for implementing simple RNN, + LSTM, GRU and etc. + + Parameters + ---------- + cell : TensorFlow cell function + A RNN cell implemented by tf.keras + - E.g. tf.keras.layers.SimpleRNNCell, tf.keras.layers.LSTMCell, tf.keras.layers.GRUCell + - Note TF2.0+, TF1.0+ and TF1.0- are different + + return_last_output : boolean + Whether return last output or all outputs in a sequence. + + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False + + In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). + By default, `False`. + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + + - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the RNN cell. The state is a list of Tensor. + For simple RNN and GRU, last_state = [last_output]; For LSTM, last_state = [last_output, last_cell_state] + + - If True, the layer will return outputs and the final state of the cell. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + + Examples + -------- + For synced sequence input and output, see `PTB example `__ + + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out, lstm_state = tl.layers.RNN( + >>> cell=tf.keras.layers.LSTMCell(units=hidden_size, dropout=0.1), + >>> in_channels=embedding_size, + >>> return_last_output=True, return_last_state=True, name='lstmrnn' + >>> )(inputs) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0], rnn_state[1]], name='rnn_model') + >>> # If LSTMCell is applied, the rnn_state is [h, c] where h the hidden state and c the cell state of LSTM. + + A stacked RNN model. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out1 = tl.layers.RNN( + >>> cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> return_last_output=False, return_seq_2d=False, return_last_state=False + >>> )(inputs) + >>> rnn_out2 = tl.layers.RNN( + >>> cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> return_last_output=True, return_last_state=False + >>> )(rnn_out1) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out2) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=outputs) + + An example if the sequences have different length and contain padding. + Similar to the DynamicRNN in TL 1.x. + + If the `sequence_length` is provided in RNN's forwarding and both `return_last_output` and `return_last_state` + are set as `True`, the forward function will automatically ignore the paddings. Note that if `return_last_output` + is set as `False`, the synced sequence outputs will still include outputs which correspond with paddings, + but users are free to select which slice of outputs to be used in following procedure. + + The `sequence_length` should be a list of integers which indicates the length of each sequence. + It is recommended to + `tl.layers.retrieve_seq_length_op3 `__ + to calculate the `sequence_length`. + + >>> data = [[[1], [2], [0], [0], [0]], [[1], [2], [3], [0], [0]], [[1], [2], [6], [1], [1]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> class DynamicRNNExample(tl.models.Model): + >>> def __init__(self): + >>> super(DynamicRNNExample, self).__init__() + >>> self.rnnlayer = tl.layers.RNN( + >>> cell=tf.keras.layers.SimpleRNNCell(units=6, dropout=0.1), in_channels=1, return_last_output=True, + >>> return_last_state=True + >>> ) + >>> def forward(self, x): + >>> z, s = self.rnnlayer(x, sequence_length=tl.layers.retrieve_seq_length_op3(x)) + >>> return z, s + >>> model = DynamicRNNExample() + >>> model.eval() + >>> output, state = model(data) + + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + cell, + return_last_output=False, + return_seq_2d=False, + return_last_state=True, + in_channels=None, + name=None, # 'rnn' + ): + + super(RNN, self).__init__(name=name) + + self.cell = cell + self.return_last_output = return_last_output + self.return_seq_2d = return_seq_2d + self.return_last_state = return_last_state + + if in_channels is not None: + self.build((None, None, in_channels)) + self._built = True + + logging.info("RNN %s: cell: %s, n_units: %s" % (self.name, self.cell.__class__.__name__, self.cell.units)) + + def __repr__(self): + s = ('{classname}(cell={cellname}, n_units={n_units}') + s += ', name=\'{name}\'' + s += ')' + return s.format( + classname=self.__class__.__name__, cellname=self.cell.__class__.__name__, n_units=self.cell.units, + **self.__dict__ + ) + + def build(self, inputs_shape): + """ + Parameters + ---------- + inputs_shape : tuple + the shape of inputs tensor + """ + # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] + if len(inputs_shape) != 3: + raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") + + with tf.name_scope(self.name) as scope: + self.cell.build(tuple(inputs_shape)) + + if self._trainable_weights is None: + self._trainable_weights = list() + for var in self.cell.trainable_variables: + self._trainable_weights.append(var) + + # @tf.function + def forward(self, inputs, sequence_length=None, initial_state=None, **kwargs): + """ + Parameters + ---------- + inputs : input tensor + The input of a network + sequence_length: None or list of integers + The actual length of each sequence in batch without padding. + If provided, when `return_last_output` and `return_last_state` are `True`, + the RNN will perform in the manner of a dynamic RNN, i.e. + the RNN will return the actual last output / state without padding. + initial_state : None or list of Tensor (RNN State) + If None, `initial_state` is zero state. + + **kwargs: dict + Some attributes can be updated during forwarding + such as `return_last_output`, `return_seq_2d`, `return_last_state`. + """ + if kwargs: + for attr in kwargs: + if attr in self.__dict__: + setattr(self, attr, kwargs[attr]) + + batch_size = inputs.get_shape().as_list()[0] + total_steps = inputs.get_shape().as_list()[1] + + # checking the type and values of sequence_length + if sequence_length is not None: + if isinstance(sequence_length, list): + pass + elif isinstance(sequence_length, tf.Tensor): + pass + elif isinstance(sequence_length, np.ndarray): + sequence_length = sequence_length.tolist() + else: + raise TypeError( + "The argument sequence_length should be either None or a list of integers. " + "Type got %s" % type(sequence_length) + ) + if (len(sequence_length) != batch_size): + raise ValueError( + "The argument sequence_length should contain %d " % batch_size + + "elements indicating the initial length of each sequence, but got only %d. " % len(sequence_length) + ) + for i in sequence_length: + if not (type(i) is int or (isinstance(i, tf.Tensor) and i.dtype.is_integer)): + raise TypeError( + "The argument sequence_length should be either None or a list of integers. " + "One element of sequence_length has the type %s" % type(i) + ) + if i > total_steps: + raise ValueError( + "The actual length of a sequence should not be longer than " + "that of the longest sequence (total steps) in this mini-batch. " + "Total steps of this mini-batch %d, " % total_steps + + "but got an actual length of a sequence %d" % i + ) + + sequence_length = tl.layers.retrieve_seq_length_op3(inputs) + + sequence_length = [i - 1 if i >= 1 else 0 for i in sequence_length] + + # set warning + # if (not self.return_last_output) and sequence_length is not None: + # warnings.warn( + # 'return_last_output is set as %s ' % self.return_last_output + + # 'When sequence_length is provided, it is recommended to set as True. ' + + # 'Otherwise, padding will be considered while RNN is forwarding.' + # ) + + # return the last output, iterating each seq including padding ones. No need to store output during each + # time step. + if self.return_last_output and sequence_length is None: + outputs = [-1] + else: + outputs = list() + + # initialize the states if provided + states = initial_state if initial_state is not None else self.cell.get_initial_state(inputs) + if not isinstance(states, list): + states = [states] + + stored_states = list() + + # initialize the cell + self.cell.reset_dropout_mask() + self.cell.reset_recurrent_dropout_mask() + + # recurrent computation + # FIXME: if sequence_length is provided (dynamic rnn), only iterate max(sequence_length) times. + for time_step in range(total_steps): + + cell_output, states = self.cell.call(inputs[:, time_step, :], states, training=self.is_train) + stored_states.append(states) + + if self.return_last_output and sequence_length is None: + outputs[-1] = cell_output + else: + outputs.append(cell_output) + + # prepare to return results + if self.return_last_output and sequence_length is None: + outputs = outputs[-1] + + elif self.return_last_output and sequence_length is not None: + outputs = tf.convert_to_tensor(outputs) + outputs = tf.gather(outputs, sequence_length, axis=0) + + outputs_without_padding = [] + for i in range(batch_size): + outputs_without_padding.append(outputs[i][i][:]) + outputs = tf.convert_to_tensor(outputs_without_padding) + else: + if self.return_seq_2d: + # PTB tutorial: stack dense layer after that, or compute the cost from the output + # 2D Tensor [batch_size * n_steps, n_hidden] + outputs = tf.reshape(tf.concat(outputs, 1), [-1, self.cell.units]) + else: + # : stack more RNN layer after that + # 3D Tensor [batch_size, n_steps, n_hidden] + outputs = tf.reshape(tf.concat(outputs, 1), [-1, total_steps, self.cell.units]) + + if self.return_last_state and sequence_length is None: + return outputs, states + elif self.return_last_state and sequence_length is not None: + + stored_states = tf.convert_to_tensor(stored_states) + stored_states = tf.gather(stored_states, sequence_length, axis=0) + + states = [] + for i in range(stored_states.shape[1]): + states.append(tf.convert_to_tensor([stored_states[b, i, b, :] for b in range(batch_size)])) + + return outputs, states + else: + return outputs + + +class SimpleRNN(RNN): + """ + The :class:`SimpleRNN` class is a fixed length recurrent layer for implementing simple RNN. + + Parameters + ---------- + units: int + Positive integer, the dimension of hidden space. + return_last_output : boolean + Whether return last output or all outputs in a sequence. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False + + In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). + By default, `False`. + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the RNN cell. The state is a list of Tensor. + For simple RNN, last_state = [last_output] + + - If True, the layer will return outputs and the final state of the cell. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + `**kwargs`: + Advanced arguments to configure the simple RNN cell. + Please check tf.keras.layers.SimpleRNNCell. + + Examples + -------- + + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out, lstm_state = tl.layers.SimpleRNN( + >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the simple rnn cell. + >>> in_channels=embedding_size, + >>> return_last_output=True, return_last_state=True, name='simplernn' + >>> )(inputs) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + units, + return_last_output=False, + return_seq_2d=False, + return_last_state=True, + in_channels=None, + name=None, # 'simplernn' + **kwargs + ): + super(SimpleRNN, self).__init__( + cell=tf.keras.layers.SimpleRNNCell(units=units, **kwargs), return_last_output=return_last_output, + return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name + ) + + +class GRURNN(RNN): + """ + The :class:`GRURNN` class is a fixed length recurrent layer for implementing RNN with GRU cell. + + Parameters + ---------- + units: int + Positive integer, the dimension of hidden space. + return_last_output : boolean + Whether return last output or all outputs in a sequence. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False + + In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). + By default, `False`. + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the RNN cell. The state is a list of Tensor. + For GRU, last_state = [last_output] + + - If True, the layer will return outputs and the final state of the cell. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + `**kwargs`: + Advanced arguments to configure the GRU cell. + Please check tf.keras.layers.GRUCell. + + Examples + -------- + + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out, lstm_state = tl.layers.GRURNN( + >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the GRU cell. + >>> in_channels=embedding_size, + >>> return_last_output=True, return_last_state=True, name='grurnn' + >>> )(inputs) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + units, + return_last_output=False, + return_seq_2d=False, + return_last_state=True, + in_channels=None, + name=None, # 'grurnn' + **kwargs + ): + super(GRURNN, self).__init__( + cell=tf.keras.layers.GRUCell(units=units, **kwargs), return_last_output=return_last_output, + return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name + ) + + +class LSTMRNN(RNN): + """ + The :class:`LSTMRNN` class is a fixed length recurrent layer for implementing RNN with LSTM cell. + + Parameters + ---------- + units: int + Positive integer, the dimension of hidden space. + return_last_output : boolean + Whether return last output or all outputs in a sequence. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False + + In a dynamic model, `return_last_output` can be updated when it is called in customised forward(). + By default, `False`. + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + - If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + - If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the RNN cell. The state is a list of Tensor. + For LSTM, last_state = [last_output, last_cell_state] + + - If True, the layer will return outputs and the final state of the cell. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + `**kwargs`: + Advanced arguments to configure the LSTM cell. + Please check tf.keras.layers.LSTMCell. + + Examples + -------- + + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out, lstm_state = tl.layers.LSTMRNN( + >>> units=hidden_size, dropout=0.1, # both units and dropout are used to configure the LSTM cell. + >>> in_channels=embedding_size, + >>> return_last_output=True, return_last_state=True, name='grurnn' + >>> )(inputs) + >>> outputs = tl.layers.Dense(n_units=1)(rnn_out) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_state[0]], name='rnn_model') + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + units, + return_last_output=False, + return_seq_2d=False, + return_last_state=True, + in_channels=None, + name=None, # 'lstmrnn' + **kwargs + ): + super(LSTMRNN, self).__init__( + cell=tf.keras.layers.LSTMCell(units=units, **kwargs), return_last_output=return_last_output, + return_seq_2d=return_seq_2d, return_last_state=return_last_state, in_channels=in_channels, name=name + ) + + +class BiRNN(Module): + """ + The :class:`BiRNN` class is a fixed length Bidirectional recurrent layer. + + Parameters + ---------- + fw_cell : TensorFlow cell function for forward direction + A RNN cell implemented by tf.keras, e.g. tf.keras.layers.SimpleRNNCell, tf.keras.layers.LSTMCell, tf.keras.layers.GRUCell. + Note TF2.0+, TF1.0+ and TF1.0- are different + bw_cell: TensorFlow cell function for backward direction similar with `fw_cell` + return_seq_2d : boolean. + If True, return 2D Tensor [batch_size * n_steps, n_hidden], for stacking Dense layer after it. + If False, return 3D Tensor [batch_size, n_steps, n_hidden], for stacking multiple RNN after it. + In a dynamic model, `return_seq_2d` can be updated when it is called in customised forward(). + By default, `False`. + return_last_state: boolean + Whether to return the last state of the two cells. The state is a list of Tensor. + - If True, the layer will return outputs, the final state of `fw_cell` and the final state of `bw_cell`. + - If False, the layer will return outputs only. + + In a dynamic model, `return_last_state` can be updated when it is called in customised forward(). + By default, `False`. + in_channels: int + Optional, the number of channels of the previous layer which is normally the size of embedding. + If given, the layer will be built when init. + If None, it will be automatically detected when the layer is forwarded for the first time. + name : str + A unique layer name. + + Examples + -------- + A simple regression model below. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> # the fw_cell and bw_cell can be different + >>> rnnlayer = tl.layers.BiRNN( + >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), + >>> return_seq_2d=True, return_last_state=True + >>> ) + >>> # if return_last_state=True, the final state of the two cells will be returned together with the outputs + >>> # if return_last_state=False, only the outputs will be returned + >>> rnn_out, rnn_fw_state, rnn_bw_state = rnnlayer(inputs) + >>> # if the BiRNN is followed by a Dense, return_seq_2d should be True. + >>> # if the BiRNN is followed by other RNN, return_seq_2d can be False. + >>> dense = tl.layers.Dense(n_units=1)(rnn_out) + >>> outputs = tl.layers.Reshape([-1, num_steps])(dense) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=[outputs, rnn_out, rnn_fw_state[0], rnn_bw_state[0]]) + + A stacked BiRNN model. + + >>> inputs = tl.layers.Input([batch_size, num_steps, embedding_size]) + >>> rnn_out1 = tl.layers.BiRNN( + >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), + >>> return_seq_2d=False, return_last_state=False + >>> )(inputs) + >>> rnn_out2 = tl.layers.BiRNN( + >>> fw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size, dropout=0.1), + >>> bw_cell=tf.keras.layers.SimpleRNNCell(units=hidden_size + 1, dropout=0.1), + >>> return_seq_2d=True, return_last_state=False + >>> )(rnn_out1) + >>> dense = tl.layers.Dense(n_units=1)(rnn_out2) + >>> outputs = tl.layers.Reshape([-1, num_steps])(dense) + >>> rnn_model = tl.models.Model(inputs=inputs, outputs=outputs) + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features]. If not, please see layer :class:`Reshape`. + + """ + + def __init__( + self, + fw_cell, + bw_cell, + return_seq_2d=False, + return_last_state=False, + in_channels=None, + name=None, # 'birnn' + ): + super(BiRNN, self).__init__(name) + + self.fw_cell = fw_cell + self.bw_cell = bw_cell + self.return_seq_2d = return_seq_2d + self.return_last_state = return_last_state + + if in_channels is not None: + self.build((None, None, in_channels)) + self._built = True + + logging.info( + "BiRNN %s: fw_cell: %s, fw_n_units: %s, bw_cell: %s, bw_n_units: %s" % ( + self.name, self.fw_cell.__class__.__name__, self.fw_cell.units, self.bw_cell.__class__.__name__, + self.bw_cell.units + ) + ) + + def __repr__(self): + s = ( + '{classname}(fw_cell={fw_cellname}, fw_n_units={fw_n_units}' + ', bw_cell={bw_cellname}, bw_n_units={bw_n_units}' + ) + s += ', name=\'{name}\'' + s += ')' + return s.format( + classname=self.__class__.__name__, fw_cellname=self.fw_cell.__class__.__name__, + fw_n_units=self.fw_cell.units, bw_cellname=self.bw_cell.__class__.__name__, bw_n_units=self.bw_cell.units, + **self.__dict__ + ) + + def build(self, inputs_shape): + """ + Parameters + ---------- + inputs_shape : tuple + the shape of inputs tensor + """ + # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] + if len(inputs_shape) != 3: + raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") + + with tf.name_scope(self.name) as scope: + self.fw_cell.build(tuple(inputs_shape)) + self.bw_cell.build(tuple(inputs_shape)) + + if self._trainable_weights is None: + self._trainable_weights = list() + for var in self.fw_cell.trainable_variables: + self._trainable_weights.append(var) + for var in self.bw_cell.trainable_variables: + self._trainable_weights.append(var) + + # @tf.function + def forward(self, inputs, fw_initial_state=None, bw_initial_state=None, **kwargs): + """ + Parameters + ---------- + inputs : input tensor + The input of a network + fw_initial_state : None or list of Tensor (RNN State) + If None, `fw_initial_state` is zero state. + bw_initial_state : None or list of Tensor (RNN State) + If None, `bw_initial_state` is zero state. + **kwargs: dict + Some attributes can be updated during forwarding + such as `return_last_output`, `return_seq_2d`, `return_last_state`. + """ + + if kwargs: + for attr in kwargs: + if attr in self.__dict__: + setattr(self, attr, kwargs[attr]) + + fw_outputs = list() + bw_outputs = list() + + fw_states = fw_initial_state if fw_initial_state is not None else self.fw_cell.get_initial_state(inputs) + bw_states = bw_initial_state if bw_initial_state is not None else self.bw_cell.get_initial_state(inputs) + + if not isinstance(fw_states, list): + fw_states = [fw_states] + if not isinstance(bw_states, list): + bw_states = [bw_states] + + total_steps = inputs.get_shape().as_list()[1] + + self.fw_cell.reset_dropout_mask() + self.fw_cell.reset_recurrent_dropout_mask() + self.bw_cell.reset_dropout_mask() + self.bw_cell.reset_recurrent_dropout_mask() + + for time_step in range(total_steps): + fw_cell_output, fw_states = self.fw_cell.call(inputs[:, time_step, :], fw_states, training=self.is_train) + bw_cell_output, bw_states = self.bw_cell.call( + inputs[:, -time_step - 1, :], bw_states, training=self.is_train + ) + + fw_outputs.append(fw_cell_output) + bw_outputs.append(bw_cell_output) + + if self.return_seq_2d: + # PTB tutorial: stack dense layer after that, or compute the cost from the output + # 2D Tensor [batch_size * n_steps, n_hidden] + fw_outputs = tf.reshape(tf.concat(fw_outputs, 1), [-1, self.fw_cell.units]) + bw_outputs = tf.reshape(tf.concat(bw_outputs, 1), [-1, self.bw_cell.units]) + else: + # : stack more RNN layer after that + # 3D Tensor [batch_size, n_steps, n_hidden] + fw_outputs = tf.reshape(tf.concat(fw_outputs, 1), [-1, total_steps, self.fw_cell.units]) + bw_outputs = tf.reshape(tf.concat(bw_outputs, 1), [-1, total_steps, self.bw_cell.units]) + + outputs = tf.concat([fw_outputs, bw_outputs], -1) + + if self.return_last_state: + return outputs, fw_states, bw_states + else: + return outputs + + +''' +class ConvRNNCell(object): + """Abstract object representing an Convolutional RNN Cell.""" + + def __call__(self, inputs, state, scope=None): + """Run this RNN cell on inputs, starting from the given state.""" + raise NotImplementedError("Abstract method") + + @property + def state_size(self): + """size(s) of state(s) used by this cell.""" + raise NotImplementedError("Abstract method") + + @property + def output_size(self): + """Integer or TensorShape: size of outputs produced by this cell.""" + raise NotImplementedError("Abstract method") + + def zero_state(self, batch_size): #, dtype=LayersConfig.tf_dtype): + """Return zero-filled state tensor(s). + Args: + batch_size: int, float, or unit Tensor representing the batch size. + Returns: + tensor of shape '[batch_size x shape[0] x shape[1] x num_features] + filled with zeros + + """ + dtype = LayersConfig.tf_dtype + shape = self.shape + num_features = self.num_features + # TODO : TypeError: 'NoneType' object is not subscriptable + zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype) + return zeros + + +class BasicConvLSTMCell(ConvRNNCell): + """Basic Conv LSTM recurrent network cell. + + Parameters + ----------- + shape : tuple of int + The height and width of the cell. + filter_size : tuple of int + The height and width of the filter + num_features : int + The hidden size of the cell + forget_bias : float + The bias added to forget gates (see above). + input_size : int + Deprecated and unused. + state_is_tuple : boolen + If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. + If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. + act : activation function + The activation function of this layer, tanh as default. + + """ + + def __init__( + self, shape, filter_size, num_features, forget_bias=1.0, input_size=None, state_is_tuple=False, + act=tf.nn.tanh + ): + """Initialize the basic Conv LSTM cell.""" + # if not state_is_tuple: + # logging.warn("%s: Using a concatenated state is slower and will soon be " + # "deprecated. Use state_is_tuple=True.", self) + if input_size is not None: + logging.warn("%s: The input_size parameter is deprecated.", self) + self.shape = shape + self.filter_size = filter_size + self.num_features = num_features + self._forget_bias = forget_bias + self._state_is_tuple = state_is_tuple + self._activation = act + + @property + def state_size(self): + """State size of the LSTMStateTuple.""" + return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) + + @property + def output_size(self): + """Number of units in outputs.""" + return self._num_units + + def __call__(self, inputs, state, scope=None): + """Long short-term memory cell (LSTM).""" + with tf.compat.v1.variable_scope(scope or type(self).__name__): # "BasicLSTMCell" + # Parameters of gates are concatenated into one multiply for efficiency. + if self._state_is_tuple: + c, h = state + else: + # print state + # c, h = tf.split(3, 2, state) + c, h = tf.split(state, 2, 3) + concat = _conv_linear([inputs, h], self.filter_size, self.num_features * 4, True) + + # i = input_gate, j = new_input, f = forget_gate, o = output_gate + # i, j, f, o = tf.split(3, 4, concat) + i, j, f, o = tf.split(concat, 4, 3) + + new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) * self._activation(j)) + new_h = self._activation(new_c) * tf.nn.sigmoid(o) + + if self._state_is_tuple: + new_state = LSTMStateTuple(new_c, new_h) + else: + new_state = tf.concat([new_c, new_h], 3) + return new_h, new_state + + +def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None): + """convolution: + + Parameters + ---------- + args : tensor + 4D Tensor or a list of 4D, batch x n, Tensors. + filter_size : tuple of int + Filter height and width. + num_features : int + Nnumber of features. + bias_start : float + Starting value to initialize the bias; 0 by default. + scope : VariableScope + For the created subgraph; defaults to "Linear". + + Returns + -------- + - A 4D Tensor with shape [batch h w num_features] + + Raises + ------- + - ValueError : if some of the arguments has unspecified or wrong shape. + + """ + # Calculate the total size of arguments on dimension 1. + total_arg_size_depth = 0 + shapes = [a.get_shape().as_list() for a in args] + for shape in shapes: + if len(shape) != 4: + raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes)) + if not shape[3]: + raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes)) + else: + total_arg_size_depth += shape[3] + + dtype = [a.dtype for a in args][0] + + # Now the computation. + with tf.compat.v1.variable_scope(scope or "Conv"): + matrix = tf.compat.v1.get_variable( + "Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype + ) + if len(args) == 1: + res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME') + else: + res = tf.nn.conv2d(tf.concat(args, 3), matrix, strides=[1, 1, 1, 1], padding='SAME') + if not bias: + return res + bias_term = tf.compat.v1.get_variable( + "Bias", [num_features], dtype=dtype, + initializer=tf.compat.v1.initializers.constant(bias_start, dtype=dtype) + ) + return res + bias_term + + +class ConvLSTM(Module): + """A fixed length Convolutional LSTM layer. + + See this `paper `__ . + + Parameters + ---------- + prev_layer : :class:`Module` + Previous layer + cell_shape : tuple of int + The shape of each cell width * height + filter_size : tuple of int + The size of filter width * height + cell_fn : a convolutional RNN cell + Cell function like :class:`BasicConvLSTMCell` + feature_map : int + The number of feature map in the layer. + initializer : initializer + The initializer for initializing the parameters. + n_steps : int + The sequence length. + initial_state : None or ConvLSTM State + If None, `initial_state` is zero state. + return_last : boolean + Whether return last output or all outputs in each step. + - If True, return the last output, "Sequence input and single output". + - If False, return all outputs, "Synced sequence input and output". + - In other word, if you want to stack more RNNs on this layer, set to False. + + return_seq_2d : boolean + Only consider this argument when `return_last_output` is `False` + - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. + - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. + + name : str + A unique layer name. + + Attributes + ---------- + outputs : tensor + The output of this RNN. return_last_output = False, outputs = all cell_output, which is the hidden state. + cell_output.get_shape() = (?, h, w, c]) + + final_state : tensor or StateTuple + The finial state of this layer. + - When state_is_tuple = False, it is the final hidden and cell states, + - When state_is_tuple = True, You can get the final state after each iteration during training, then feed it to the initial state of next iteration. + + initial_state : tensor or StateTuple + It is the initial state of this ConvLSTM layer, you can use it to initialize + your state at the beginning of each epoch or iteration according to your + training procedure. + + batch_size : int or tensor + Is int, if able to compute the batch_size, otherwise, tensor for ``?``. + + """ + + @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release + def __init__( + self, + prev_layer, + cell_shape=None, + feature_map=1, + filter_size=(3, 3), + cell_fn=BasicConvLSTMCell, + initializer=tf.compat.v1.initializers.random_uniform(-0.1, 0.1), + n_steps=5, + initial_state=None, + return_last=False, + return_seq_2d=False, + name='convlstm', + ): + super(ConvLSTM, self).__init__(prev_layer=prev_layer, name=name) + + logging.info( + "ConvLSTM %s: feature_map: %d, n_steps: %d, " + "in_dim: %d %s, cell_fn: %s " % + (self.name, feature_map, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__) + ) + # You can get the dimension by .get_shape() or ._shape, and check the + # dimension by .with_rank() as follow. + # self.inputs.get_shape().with_rank(2) + # self.inputs.get_shape().with_rank(3) + + # Input dimension should be rank 5 [batch_size, n_steps(max), h, w, c] + try: + self.inputs.get_shape().with_rank(5) + except Exception: + raise Exception( + "RNN : Input dimension should be rank 5 : [batch_size, n_steps, input_x, " + "input_y, feature_map]" + ) + + fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] + + if fixed_batch_size.value: + batch_size = fixed_batch_size.value + logging.info(" RNN batch_size (concurrent processes): %d" % batch_size) + + else: + batch_size = array_ops.shape(self.inputs)[0] + logging.info(" non specified batch_size, uses a tensor instead.") + self.batch_size = batch_size + outputs = [] + self.cell = cell = cell_fn(shape=cell_shape, filter_size=filter_size, num_features=feature_map) + + if initial_state is None: + self.initial_state = cell.zero_state(batch_size, dtype=LayersConfig.tf_dtype) + else: + self.initial_state = initial_state + + state = self.initial_state + + # with tf.variable_scope("model", reuse=None, initializer=initializer): + with tf.compat.v1.variable_scope(name, initializer=initializer) as vs: + for time_step in range(n_steps): + if time_step > 0: tf.compat.v1.get_variable_scope().reuse_variables() + (cell_output, state) = cell(self.inputs[:, time_step, :, :, :], state) + outputs.append(cell_output) + + # Retrieve just the RNN variables. + # rnn_variables = [v for v in tf.all_variables() if v.name.startswith(vs.name)] + rnn_variables = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.VARIABLES, scope=vs.name) + + logging.info(" n_params : %d" % (len(rnn_variables))) + + if return_last: + # 2D Tensor [batch_size, n_hidden] + self.outputs = outputs[-1] + else: + if return_seq_2d: + # PTB tutorial: stack dense layer after that, or compute the cost from the output + # 4D Tensor [n_example, h, w, c] + self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, cell_shape[0] * cell_shape[1] * feature_map]) + else: + # : stack more RNN layer after that + # 5D Tensor [n_example/n_steps, n_steps, h, w, c] + self.outputs = tf.reshape( + tf.concat(outputs, 1), [-1, n_steps, cell_shape[0], cell_shape[1], feature_map] + ) + + self.final_state = state + + self._add_layers(self.outputs) + self._add_params(rnn_variables) + +''' + + +# @tf.function +def retrieve_seq_length_op(data): + """An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features], + it can be used when the features of padding (on right hand side) are all zeros. + + Parameters + ----------- + data : tensor + [batch_size, n_step(max), n_features] with zero padding on right hand side. + + Examples + ----------- + Single feature + + >>> data = [[[1],[2],[0],[0],[0]], + >>> [[1],[2],[3],[0],[0]], + >>> [[1],[2],[6],[1],[0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op(data) + [2 3 4] + + Multiple features + + >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], + >>> [[2,3],[2,4],[3,2],[0,0],[0,0]], + >>> [[3,3],[2,2],[5,3],[1,2],[0,0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op(data) + [4 3 4] + + References + ------------ + Borrow from `TFlearn `__. + + """ + with tf.name_scope('GetLength'): + used = tf.sign(tf.reduce_max(input_tensor=tf.abs(data), axis=2)) + length = tf.reduce_sum(input_tensor=used, axis=1) + + return tf.cast(length, tf.int32) + + +# @tf.function +def retrieve_seq_length_op2(data): + """An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)], + it can be used when the features of padding (on right hand side) are all zeros. + + Parameters + ----------- + data : tensor + [batch_size, n_step(max)] with zero padding on right hand side. + + Examples + ----------- + >>> data = [[1,2,0,0,0], + >>> [1,2,3,0,0], + >>> [1,2,6,1,0]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op2(data) + tensor([2 3 4]) + + """ + return tf.reduce_sum(input_tensor=tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), axis=1) + + +# @tf.function +def retrieve_seq_length_op3(data, pad_val=0): + """An op to compute the length of a sequence, the data shape can be [batch_size, n_step(max)] or + [batch_size, n_step(max), n_features]. + + If the data has type of tf.string and pad_val is assigned as empty string (''), this op will compute the + length of the string sequence. + + Parameters + ----------- + data : tensor + [batch_size, n_step(max)] or [batch_size, n_step(max), n_features] with zero padding on the right hand side. + pad_val: + By default 0. If the data is tf.string, please assign this as empty string ('') + + Examples + ----------- + >>> data = [[[1],[2],[0],[0],[0]], + >>> [[1],[2],[3],[0],[0]], + >>> [[1],[2],[6],[1],[0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op3(data) + tensor([2, 3, 4]) + >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], + >>> [[2,3],[2,4],[3,2],[0,0],[0,0]], + >>> [[3,3],[2,2],[5,3],[1,2],[0,0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op3(data) + tensor([4, 3, 4]) + >>> data = [[1,2,0,0,0], + >>> [1,2,3,0,0], + >>> [1,2,6,1,0]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> length = tl.layers.retrieve_seq_length_op3(data) + tensor([2, 3, 4]) + >>> data = [['hello','world','','',''], + >>> ['hello','world','tensorlayer','',''], + >>> ['hello','world','tensorlayer','2.0','']] + >>> data = tf.convert_to_tensor(data, dtype=tf.string) + >>> length = tl.layers.retrieve_seq_length_op3(data, pad_val='') + tensor([2, 3, 4]) + + """ + data_shape_size = data.get_shape().ndims + if data_shape_size == 3: + return tf.reduce_sum( + input_tensor=tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), + axis=1 + ) + elif data_shape_size == 2: + return tf.reduce_sum(input_tensor=tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), axis=1) + elif data_shape_size == 1: + raise ValueError("retrieve_seq_length_op3: data has wrong shape! Shape got ", data.get_shape().as_list()) + else: + raise ValueError( + "retrieve_seq_length_op3: handling data with num of dims %s hasn't been implemented!" % (data_shape_size) + ) + + +def target_mask_op(data, pad_val=0): + """ Return the mask of the input sequence data based on the padding values. + + Parameters + ----------- + data : tf.Tensor + A tensor with 2 or 3 dimensions. + pad_val: int, float, string, etc + The value that represent padding. By default, 0. For tf.string, you may use empty string. + + Examples + ----------- + >>> data = [['hello', 'world', '', '', ''], + >>> ['hello', 'world', 'tensorlayer', '', ''], + >>> ['hello', 'world', 'tensorlayer', '2.0', '']] + >>> data = tf.convert_to_tensor(data, dtype=tf.string) + >>> mask = tl.layers.target_mask_op(data, pad_val='') + >>> print(mask) + tf.Tensor( + [[1 1 0 0 0] + [1 1 1 0 0] + [1 1 1 1 0]], shape=(3, 5), dtype=int32) + >>> data = [[[1], [0], [0], [0], [0]], + >>> [[1], [2], [3], [0], [0]], + >>> [[1], [2], [0], [1], [0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> mask = tl.layers.target_mask_op(data) + >>> print(mask) + tf.Tensor( + [[1 0 0 0 0] + [1 1 1 0 0] + [1 1 0 1 0]], shape=(3, 5), dtype=int32) + >>> data = [[[0,0],[2,2],[1,2],[1,2],[0,0]], + >>> [[2,3],[2,4],[3,2],[1,0],[0,0]], + >>> [[3,3],[0,1],[5,3],[1,2],[0,0]]] + >>> data = tf.convert_to_tensor(data, dtype=tf.float32) + >>> mask = tl.layers.target_mask_op(data) + >>> print(mask) + tf.Tensor( + [[0 1 1 1 0] + [1 1 1 1 0] + [1 1 1 1 0]], shape=(3, 5), dtype=int32) + """ + + if not isinstance(data, tf.Tensor): + raise AttributeError("target_mask_op: the type of input data should be tf.Tensor but got %s." % type(data)) + data_shape_size = data.get_shape().ndims + if data_shape_size == 3: + return tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32) + elif data_shape_size == 2: + return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32) + elif data_shape_size == 1: + raise ValueError( + "target_mask_op: data_shape %s is not supported. " + "The shape of data should have 2 or 3 dims." % (data.get_shape()) + ) + else: + raise ValueError( + "target_mask_op: handling data_shape %s hasn't been implemented! " + "The shape of data should have 2 or 3 dims" % (data.get_shape()) + ) diff --git a/tensorlayer/layers/shape.py b/tensorlayer/layers/shape.py index 477847d..8ecdad0 100644 --- a/tensorlayer/layers/shape.py +++ b/tensorlayer/layers/shape.py @@ -202,11 +202,6 @@ class Shuffle(Module): if tl.BACKEND == 'tensorflow': in_shape = tl.get_tensor_shape(inputs) h, w, in_channel = in_shape[1:] - # if in_channel % self.group != 0: - # raise ValueError( - # "The in_channel must be a multiple of the number of groups. The in_channel got %d and the number of groups is %d." - # % (in_channel, self.group) - # ) reshape1 = tl.ops.Reshape([-1, h, w, in_channel // self.group, self.group]) temp = reshape1(inputs) temp = self.transpose(temp) diff --git a/tensorlayer/layers/stack.py b/tensorlayer/layers/stack.py index 570f805..6c84291 100644 --- a/tensorlayer/layers/stack.py +++ b/tensorlayer/layers/stack.py @@ -24,14 +24,13 @@ class Stack(Module): Examples --------- - >>> import tensorflow as tf >>> import tensorlayer as tl - >>> ni = tl.layers.Input([None, 784], name='input') + >>> ni = tl.layers.Input([10, 784], name='input') >>> net1 = tl.layers.Dense(10, name='dense1')(ni) >>> net2 = tl.layers.Dense(10, name='dense2')(ni) >>> net3 = tl.layers.Dense(10, name='dense3')(ni) >>> net = tl.layers.Stack(axis=1, name='stack')([net1, net2, net3]) - (?, 3, 10) + (10, 3, 10) """ @@ -82,9 +81,9 @@ class UnStack(Module): Examples -------- - >>> ni = Input([4, 10], name='input') - >>> nn = Dense(n_units=5)(ni) - >>> nn = UnStack(axis=1)(nn) # unstack in channel axis + >>> ni = tl.layers.Input([4, 10], name='input') + >>> nn = tl.layers.Dense(n_units=5)(ni) + >>> nn = tl.layers.UnStack(axis=1)(nn) # unstack in channel axis >>> len(nn) # 5 >>> nn[0].shape # (4,) diff --git a/tensorlayer/layers/utils.py b/tensorlayer/layers/utils.py index b231d1b..18888c2 100644 --- a/tensorlayer/layers/utils.py +++ b/tensorlayer/layers/utils.py @@ -433,8 +433,10 @@ def mean_var_with_update(update_moving_mean, update_moving_variance, mean, varia with tf.control_dependencies([update_moving_mean, update_moving_variance]): return tf.identity(mean), tf.identity(variance) + def w_fold(w, gama, var, epsilon): return tf.compat.v1.div(tf.multiply(gama, w), tf.sqrt(var + epsilon)) + def bias_fold(beta, gama, mean, var, epsilon): - return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon))) \ No newline at end of file + return tf.subtract(beta, tf.compat.v1.div(tf.multiply(gama, mean), tf.sqrt(var + epsilon))) diff --git a/tensorlayer/metric/__init__.py b/tensorlayer/metric/__init__.py index c11f832..75a03a3 100644 --- a/tensorlayer/metric/__init__.py +++ b/tensorlayer/metric/__init__.py @@ -7,8 +7,6 @@ if BACKEND == 'tensorflow': from .tensorflow_metric import * elif BACKEND == 'mindspore': from .mindspore_metric import * -elif BACKEND == 'dragon': - pass elif BACKEND == 'paddle': from .paddle_metric import * else: diff --git a/tensorlayer/metric/mindspore_metric.py b/tensorlayer/metric/mindspore_metric.py index bcc6499..710ed4e 100644 --- a/tensorlayer/metric/mindspore_metric.py +++ b/tensorlayer/metric/mindspore_metric.py @@ -2,7 +2,6 @@ # -*- coding: utf-8 -*- import mindspore.nn as nn -from mindspore.nn.metrics._evaluation import EvaluationBase from mindspore.nn.metrics.metric import Metric __all__ = [ 'Accuracy', diff --git a/tensorlayer/models/__init__.py b/tensorlayer/models/__init__.py index 2ebf9f0..2265e6e 100644 --- a/tensorlayer/models/__init__.py +++ b/tensorlayer/models/__init__.py @@ -1,12 +1,6 @@ #! /usr/bin/python # -*- coding: utf-8 -*- -# """A collections of pre-defined well known models.""" - -from .core import * -# from .resnet import ResNet50 -# from .mobilenetv1 import MobileNetV1 -# from .squeezenetv1 import SqueezeNetV1 -# from .vgg import * -# from .seq2seq import Seq2seq -# from .seq2seq_with_attention import Seq2seqLuongAttention +from .core import Model +from .core import WithLoss +from .core import TrainOneStep diff --git a/tensorlayer/models/core.py b/tensorlayer/models/core.py index e449af0..29f52b7 100644 --- a/tensorlayer/models/core.py +++ b/tensorlayer/models/core.py @@ -2,28 +2,23 @@ # -*- coding: utf-8 -*- from collections.abc import Iterable -from tensorlayer.files import utils -from tensorlayer import logging +from tensorlayer.layers.core.common import _save_weights, _load_weights import tensorlayer as tl from tensorlayer.layers.core import Module import numpy as np -import os import time if tl.BACKEND == 'tensorflow': import tensorflow as tf if tl.BACKEND == 'mindspore': - import mindspore as ms from mindspore.ops import composite from mindspore.ops import operations as P - from mindspore.ops import functional as F - # from mindspore.parallel._utils import (_get_device_num, _get_mirror_mean, _get_parallel_mode) - # from mindspore.train.parallel_utils import ParallelMode - from mindspore.nn.wrap import DistributedGradReducer from mindspore.common import ParameterTuple if tl.BACKEND == 'paddle': import paddle as pd +__all__ = ['Model', 'WithLoss', 'TrainOneStep'] + class Model: """ @@ -31,37 +26,54 @@ class Model: `Model` groups layers into an object with training and inference features. - Args: - network : The training or testing network. - loss_fn : Objective function, if loss_fn is None, the - network should contain the logic of loss and grads calculation, and the logic - of parallel if needed. Default: None. - optimizer : Optimizer for updating the weights. Default: None. - metrics : Dict or set of metrics to be evaluated by the model during + Parameters + ---------- + network : tensorlayer model + The training or testing network. + loss_fn : function + Objective function + optimizer : class + Optimizer for updating the weights + metrics : class + Dict or set of metrics to be evaluated by the model during + + Methods + --------- + trin() + Model training. + eval() + Model prediction. + save_weights() + Input file_path, save model weights into a file of given format. + Use load_weights() to restore. + load_weights() + Load model weights from a given file, which should be previously saved by save_weights(). + + Examples + -------- + >>> import tensorlayer as tl + >>> class Net(Module): + >>> def __init__(self): + >>> super(Net, self).__init__() + >>> self.conv = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), in_channels=5, name='conv2d') + >>> self.bn = tl.layers.BatchNorm2d(num_features=32, act=tl.ReLU) + >>> self.flatten = tl.layers.Flatten() + >>> self.fc = tl.layers.Dense(n_units=12, in_channels=32*224*224) # padding=0 + >>> + >>> def construct(self, x): + >>> x = self.conv(x) + >>> x = self.bn(x) + >>> x = self.flatten(x) + >>> out = self.fc(x) + >>> return out + >>> + >>> net = Net() + >>> loss = tl.cost.softmax_cross_entropy_with_logits + >>> optim = tl.optimizers.Momentum(params=net.trainable_weights, learning_rate=0.1, momentum=0.9) + >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) + >>> dataset = get_dataset() + >>> model.train(2, dataset) - Examples: - >>> import tensorlayer as tl - >>> class Net(Module): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.conv = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), in_channels=5, name='conv2d') - >>> self.bn = tl.layers.BatchNorm2d(num_features=32, act=tl.ReLU) - >>> self.flatten = tl.layers.Flatten() - >>> self.fc = tl.layers.Dense(n_units=12, in_channels=32*224*224) # padding=0 - >>> - >>> def construct(self, x): - >>> x = self.conv(x) - >>> x = self.bn(x) - >>> x = self.flatten(x) - >>> out = self.fc(x) - >>> return out - >>> - >>> net = Net() - >>> loss = tl.cost.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) - >>> optim = tl.layers.Momentum(params=net.trainable_weights, learning_rate=0.1, momentum=0.9) - >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) - >>> dataset = get_dataset() - >>> model.train(2, dataset) """ def __init__(self, network, loss_fn=None, optimizer=None, metrics=None, **kwargs): @@ -96,13 +108,18 @@ class Model: ) def eval(self, test_dataset): - self.network.eval() + self.network.set_eval() test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_dataset: _logits = self.network(X_batch) test_loss += self.loss_fn(_logits, y_batch) if self.metrics: - test_acc += self.metrics(_logits, y_batch) + try: + test_acc += self.metrics(_logits, y_batch) + except: + self.metrics.update(_logits, y_batch) + test_acc += self.metrics.result() + self.metrics.reset() else: test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 @@ -134,43 +151,20 @@ class Model: -------- 1) Save model weights in hdf5 format by default. >>> net = vgg16() - >>> net.save_weights('./model.h5') + >>> optimizer = tl.optimizers.Adam(learning_rate=0.001) + >>> metric = tl.metric.Accuracy() + >>> model = tl.models.Model(network=net, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer, metrics=metric) + >>> model.save_weights('./model.h5') ... - >>> net.load_weights('./model.h5') + >>> model.load_weights('./model.h5') 2) Save model weights in npz/npz_dict format - >>> net = vgg16() - >>> net.save_weights('./model.npz') - >>> net.save_weights('./model.npz', format='npz_dict') + >>> model.save_weights('./model.npz') + >>> model.save_weights('./model.npz', format='npz_dict') """ - # self.all_weights = self.network.all_weights - if self.all_weights is None or len(self.all_weights) == 0: - logging.warning("Model contains no weights or layers haven't been built, nothing will be saved") - return - - if format is None: - postfix = file_path.split('.')[-1] - if postfix in ['h5', 'hdf5', 'npz', 'ckpt']: - format = postfix - else: - format = 'hdf5' - - if format == 'hdf5' or format == 'h5': - utils.save_weights_to_hdf5(file_path, self) - elif format == 'npz': - utils.save_npz(self.all_weights, file_path) - elif format == 'npz_dict': - utils.save_npz_dict(self.all_weights, file_path) - elif format == 'ckpt': - # TODO: enable this when tf save ckpt is enabled - raise NotImplementedError("ckpt load/save is not supported now.") - else: - raise ValueError( - "Save format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'." - "Other format is not supported now." - ) + _save_weights(net=self, file_path=file_path, format=format) def load_weights(self, file_path, format=None, in_order=True, skip=False): """Load model weights from a given file, which should be previously saved by self.save_weights(). @@ -201,15 +195,18 @@ class Model: Examples -------- 1) load model from a hdf5 file. - >>> net = tl.models.vgg16() - >>> net.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch - >>> net.load_weights('./model_eager.h5') # load sequentially + >>> net = vgg16() + >>> optimizer = tl.optimizers.Adam(learning_rate=0.001) + >>> metric = tl.metric.Accuracy() + >>> model = tl.models.Model(network=net, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer, metrics=metric) + >>> model.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch + >>> model.load_weights('./model_eager.h5') # load sequentially 2) load model from a npz file - >>> net.load_weights('./model.npz') + >>> model.load_weights('./model.npz') - 2) load model from a npz file, which is saved as npz_dict previously - >>> net.load_weights('./model.npz', format='npz_dict') + 3) load model from a npz file, which is saved as npz_dict previously + >>> model.load_weights('./model.npz', format='npz_dict') Notes ------- @@ -219,31 +216,8 @@ class Model: 'in_order' argument will be ignored. """ - if not os.path.exists(file_path): - raise FileNotFoundError("file {} doesn't exist.".format(file_path)) - - if format is None: - format = file_path.split('.')[-1] - if format == 'hdf5' or format == 'h5': - if skip ==True or in_order == False: - # load by weights name - utils.load_hdf5_to_weights(file_path, self, skip) - else: - # load in order - utils.load_hdf5_to_weights_in_order(file_path, self) - elif format == 'npz': - utils.load_and_assign_npz(file_path, self) - elif format == 'npz_dict': - utils.load_and_assign_npz_dict(file_path, self, skip) - elif format == 'ckpt': - # TODO: enable this when tf save ckpt is enabled - raise NotImplementedError("ckpt load/save is not supported now.") - else: - raise ValueError( - "File format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. " - "Other format is not supported now." - ) + _load_weights(net=self, file_path=file_path, format=format, in_order=in_order, skip=skip) def tf_train( self, n_epoch, train_dataset, network, loss_fn, train_weights, optimizer, metrics, print_train_batch, @@ -287,7 +261,7 @@ class Model: if test_dataset: # use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - network.eval() + network.set_eval() val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_dataset: _logits = network(X_batch) # is_train=False, disable dropout @@ -307,7 +281,7 @@ class Model: print_freq, test_dataset ): net_with_criterion = WithLoss(network, loss_fn) - train_network = GradWrap(net_with_criterion) + train_network = GradWrap(net_with_criterion, network.trainable_weights) train_network.set_train() for epoch in range(n_epoch): start_time = time.time() @@ -340,7 +314,7 @@ class Model: if test_dataset: # use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - network.eval() + network.set_eval() val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_dataset: _logits = network(X_batch) @@ -394,7 +368,7 @@ class Model: if test_dataset: # use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - network.eval() + network.set_eval() val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_dataset: _logits = network(X_batch) # is_train=False, disable dropout @@ -411,13 +385,38 @@ class Model: class WithLoss(Module): + """ + High-Level API for Training or Testing. + + Wraps the network with loss function. This Module accepts data and label as inputs and + the computed loss will be returned. + + Parameters + ---------- + backbone : tensorlayer model + The tensorlayer network. + loss_fn : function + Objective function + + Methods + --------- + forward() + Model inference. + + Examples: + >>> import tensorlayer as tl + >>> net = vgg16() + >>> loss_fn = tl.cost.softmax_cross_entropy_with_logits + >>> net_with_loss = tl.models.WithLoss(net, loss_fn) + + """ def __init__(self, backbone, loss_fn): super(WithLoss, self).__init__() self._backbone = backbone self._loss_fn = loss_fn - def construct(self, data, label): + def forward(self, data, label): out = self._backbone(data) return self._loss_fn(out, label) @@ -429,10 +428,100 @@ class WithLoss(Module): class GradWrap(Module): """ GradWrap definition """ - def __init__(self, network): + def __init__(self, network, trainable_weights): super(GradWrap, self).__init__(auto_prefix=False) self.network = network - self.weights = ParameterTuple(network.trainable_weights) + self.weights = ParameterTuple(trainable_weights) def forward(self, x, label): return composite.GradOperation(get_by_list=True)(self.network, self.weights)(x, label) + + +class TrainOneStepWithTF(object): + + def __init__(self, net_with_loss, optimizer, train_weights): + self.net_with_loss = net_with_loss + self.optimzer = optimizer + self.train_weights = train_weights + + def __call__(self, data, label): + with tf.GradientTape() as tape: + loss = self.net_with_loss(data, label) + grad = tape.gradient(loss, self.train_weights) + self.optimzer.apply_gradients(zip(grad, self.train_weights)) + return loss + + +class TrainOneStepWithMS(object): + + def __init__(self, net_with_loss, optimizer, train_weights): + self.net_with_loss = net_with_loss + self.optimizer = optimizer + self.train_weights = train_weights + self.net_with_loss = net_with_loss + self.train_network = GradWrap(net_with_loss, train_weights) + + def __call__(self, data, label): + loss = self.net_with_loss(data, label) + grads = self.train_network(data, label) + self.optimizer.apply_gradients(zip(grads, self.train_weights)) + loss = loss.asnumpy() + return loss + + +class TrainOneStepWithPD(object): + + def __init__(self, net_with_loss, optimizer, train_weights): + self.net_with_loss = net_with_loss + self.optimizer = optimizer + self.train_weights = train_weights + + def __call__(self, data, label): + loss = self.net_with_loss(data, label) + params_grads = self.optimizer.gradient(loss, self.train_weights) + self.optimizer.apply_gradients(params_grads) + return loss.numpy() + + +class TrainOneStep(object): + """ + High-Level API for Training One Step. + + Wraps the network with an optimizer. It can be trained in one step using the optimizer to get the loss. + + Parameters + ---------- + net_with_loss : tensorlayer WithLoss + The training or testing network. + optimizer : class + Optimizer for updating the weights + train_weights : class + Dict or set of metrics to be evaluated by the model during + + Examples + -------- + >>> import tensorlayer as tl + >>> net = vgg16() + >>> train_weights = net.trainable_weights + >>> loss_fn = tl.cost.softmax_cross_entropy_with_logits + >>> optimizer = tl.optimizers.Adam(learning_rate=1e-3) + >>> net_with_loss = tl.models.WithLoss(net, loss_fn) + >>> train_one_step = tl.models.TrainOneStep(net_with_loss, optimizer, train_weights) + >>> inputs, labels = tl.layers.Input((128, 784), dtype=tl.float32), tl.layers.Input((128, 1), dtype=tl.int32) + >>> train_one_step(inputs, labels) + + """ + + def __init__(self, net_with_loss, optimizer, train_weights): + if tl.BACKEND == 'tensorflow': + self.net_with_train = TrainOneStepWithTF(net_with_loss, optimizer, train_weights) + elif tl.BACKEND == 'mindspore': + self.net_with_train = TrainOneStepWithMS(net_with_loss, optimizer, train_weights) + elif tl.BACKEND == 'paddle': + self.net_with_train = TrainOneStepWithPD(net_with_loss, optimizer, train_weights) + else: + raise NotImplementedError("This backend is not supported") + + def __call__(self, data, label): + loss = self.net_with_train(data, label) + return loss diff --git a/tensorlayer/models/models/resnet50_weights_tf_dim_ordering_tf_kernels.h5 b/tensorlayer/models/models/resnet50_weights_tf_dim_ordering_tf_kernels.h5 deleted file mode 100644 index 904349fea3fbc3f8267a7cc70c5b7d7f464781e5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24576 zcmeFXd0bD=|2CfXy`+fJqD4Zx)a%TgRwN{mH5C=Il~5wIX_Y8Nq)3ZaQd(Z~oRh5( zvdg|^sif@L?^L|={@maD_s8#kJbuglJ%5}z&-1)y=9-ynX3or+SO@#@GLl0i1%6%P z;sU|~^4*WWZ$EplDql^}U+;d~zx|%Q5B++>nBMS^V9!Ipt`vXylzZRH_olBQ)brwU zPtF1}T*i$P=qL0i{eS19vHo~>itBpQakkGE`So~Qxc9}n9{=C$dwu^M{C~a%9LCu= zcNapcrzlmuy3w0i)&1hX(%E+H9%Y91MC;Px|6GrUhWFrh_u_sB{d@KLZ(U0Z?3Vl` zy?z4H0^(ijUcMmMC%|Lnf)zfiyQ2>1N>1*Vzh6(frM(IM4t-bpuU+F|-1}Bwy-$Fj z{{nAs-<69^ygiJ){8k43@d*elTu7AzJUwY`1-BvPGH4?b^d;S%X^~=1o*5r zS>UlSV8KeSB_<2H;`X%!`ouNq%KR_z!oR=^1^R+b{|)x~D{dFrz8QJ_W&SG33ZH<* zKL3=r4-K>4lo$TRxsTPC#+ZMDyE*>>|IOL_-(as_B>#ZBNcOP$RT14+A68?#Ui_*Y zufJIJvHG&I_&2zl)gSQRtStWx?q>A|{D;-A8t=aPurfC4W@Y+U`}Vc^vNG}&l+qbr5K zz}=kxME$!ajl0zF7x-6A{*&^bn*1f2|6tW6j$d*A)O$CpKGt8Xy43I&xSQ1<@V{7f zso^hhH>*G3KdgSK^FLU1iQ`vX@4l+*YxQMi(kq5P;BHoZtiQm$V)*0kX4S{)?F+wj z|8G_%J=!q)OLcv%zcT7+?tk3BSoO930{1lcKkjbT{V}^$_s8l{-5>KG)&1&z|G~LO z8)ko`^Q(RPT7S{#5yL<3Zq9$A{@uPNJ!1F={HrGWTD>*-tDpY|s~&Bb{ZT_Vt3K9W zta`-o54fAvAMoF-Onb!e54fAvAMhX5{Tf03&C0Y#8)kph(9Nok)t8lNuNeNgyIJ+I z{sQ-k;g7qURUhm3yzr|re_egXI@4Zl`1I`yW__%`z`f1=$KB1UkM$S0x4HkgyIJ+I zezW>LG4y5C+xmY zzEp69&L>I({s96^|Ah9)gy*~z}>9= zfd9p+M-2afyIK7K|I~#zL_j>GiP5hr3{$7vG?k&D}Z#es3 zvg=v>2naa;Z>py0PE+lFX}GI1bzhfDd&1xO#GL4{51r}>jRbo_|L3yZ55I95V!u{r zU0WXCd)|BY;yPq|-v7p3*6ZmQ9r``t_Fmj^vmt+_XJp@FJCE-PyPf|pzWrX;D)$s$ zz@oR^qy-$hYWLT=Rz^UvYh4@U$m^^s@{J0pGUpp{qG+H(p_H)zvrJ{`>Ow*e}3(&N^~XudtbG8Iu`gH z{^8m4@DIU%{nz--ub+Tu_y38W+pl@gq1)9@xTi#7y};jL?{wES`}JJE-~SH(?HB4Q z!>^S3Yp8dZOF%$FV34@Lpv}GUMY?9wUw%PB!R`!ymHF3$P`}wKy^!SIuscxboqc~{ z`pXo|66odI7xRDTx##;zus4^klm`E=Q~NEG*(LvL-2Z7_*?<@5TdXY@Cf4yTf7ph^ zp&Hd(*+;YducM)*oy^>!tI_N0M$F6hCOFVO4fRtRO3n?>hn6|j>{v?`hbORjKm!qTsjxc$J8?^6v zL1wDu0M3{S6SaJa=0zFgrn(DXb2dZG7i6&8P6p+f#Pgz!qj@FUGa0s?qQznTF$qm2 z3NMwQ!r2gn>}Jz1nq~aMj>nn$y-qmi(?TZFM;1vx-j1Ero?_#ib?C+LRwnQA60kA$ zC;qW=0IF$BigFbA7@nn5(?7wX>v=G1haFB9GKZ%JUlN2?i9BRC3 zhfL@GAY5lUZ(@QL_Euq-D7|vx@OmJ>$}pMS$k{|5L-LlvB8ziH)+PfXXxFl+l1#KhB9J5k|7s)XiP#e3bYZ2eunD!ZNEJJ7tJio*Zk)w z^6*|Iukx2&a--hz8?*^MkN+9}7otZL97d6LwqkNlKl=VrS zbPk(9Z@n^N&R!l4N&Ff*2Ngo%&dVrZLj_v#sSFlM1c2$wM96(C1zERkNwC=}s=93( z>|Jn{G@aC<``E3tu)-Al;+OEB`uY*?FLr3`qVs6v*bs8eV;$V*Qt8%h1YOG~mYbR< z!^9`2c@Yib^khF1o`0nduKLo#w=QWu}v2(6}*k2$Fb;Dh943%x`Un>mBS+6NVwE-7!6Y?fQ_$O zVUyw;5|F0@Te(yezpj}%&|Jkx2W&(6$^&U)h$-Cm%3w}URY8{q?PPcmK=uY2u>cZFKvx!=a&=;h`_r7{|cjB0`SJYnQ+djnZ0%F}n6o6yvyulZ@a z-;t}H(x4k)g}!H}p+X@Sd?_`VSv%}8vyE4bFN=obXx9gfO#_RTy>11Ykio>@!FyyT zxEalfjzs+*=+mJOKSRQrRhd5S<()YRu-yXfXwX1P3-5sX z1#kQ=!W=7`t|3+%jj>Y63$pp806%VKS@o-Ba} zRG^xRh9H?)^7N&T5y+P{qRr_tT>KUlG-Xd3Gjpjoiht#Vva1t7UL+k^K2@XR;^G;T zPgsKz~G#c1b{Jd};~F}3``Sj;PfCb8E{=)^{{I+l;T&CkN%8PX8N+yZNh%kaUV z8J%peVFrhdphp+)hSoDq5IX-I891PTe^dQERGc3U_CmJ!;VVCK2!8N32oFRnri2iu za~F6w)0NSC9d#!4Llm-=i(#ZBwxOzVhD2q+0Up%vMTWCniO`Q>XvU=&{#XxhxEJ(| z88$T+Hrcn}r3(=mAMG+kd=>49rWO_>&Z;FZn z!Pf&o$uk}dT8qIkNE#>KzfFWx6G5r96OB6=3lbNa(F%7JoOP|9zR4O1S)2pkJl6q+ z3D3ipsa1I2lwEig&jlqcE<@#3%?ycFL094epk>4aD)@6Sx}*7t=aQvJP8nRO-e!H& z@{EKcK6HKxekL{%?cK>EHDZVG8|Qf37@5ITCLKWcR0XKpKxdR3mrlkG+ynIo)N#CY zG#zp<`kl*mbr)J}4Pn{W51c9{25Xb)k#`xiv%x=PzB4=eGExAqh@6LdF?3vy)8f^6* zwe7EFP7W6*msk&?G)RSVX===fLV0MBw?pG4$2UHka zD&dZ|z}FG?QGJIb9d|_q+!J3S$xnIcXo@jj<`IvSu2^CNfrT)UEGGd9XGm4yBzjRg z3<;DDW!j(_uFu|#o+vcJPw}BJHM^6nh#QY@7i=KZNr`(uNt|B0t!(+)(VcGm5e#h` z((qT_LG12w11*+TWClDlhXj3bbjM;Ob4y?x9Tuxh>tjtJX`=vnDD{asS*=RLW45B4 zIj(qCtR%Ww=gv1QE5kyr4p{YqN%cUcjnPtj0fE}hs4Ya5E?7RDrg%r84Qnn!%9a?I zT$qhdo_c}8RLA2l0`E}efnx978AxGcRDY5xc!-*<v2p+3={o+3SRaA!?N~l>Xr9^HZmLuus#d56SkmQhf*Bwn8UcuSpvFqcu3K{4h5}Z zV0Dlv-F0&+H$(p@O|ZW~Hk&%56%|6r*|ZMCljoD0K7v$<6-6H5>dZ2OrQFsFA~Z$H z2nE=0Lu=3duzWgS3}4vgNYDS=OpSIMq39z6>4}Z?5O9{k4Hq6k$ObJ;mc2(roG=-Y zL$MP-4(3KWK+I4*s40E}THG7NJs+?q+zx9hp+@`XfvMgKf*4_lAeOOe=hPAz%i)QTkf1wdKj8usOuc>2vK zlous@24?(-2A)V5p1d%Nt`Q4G_2pZL%o$(C<%KxEOh=2lkME?rVpXWZDP_y14^I3f znIF*MVn@?-=TOye(qKF83Uhs|1xybgz(#&CfPJ&OzUP%~u=bh&7(Bm&Iel&lO^LaS z!d(0yB~1n-eC^PDIaBO@Zycok)U-S6OH zK=dmmFY>MFg>g=F;HqQfLgx(Jp*RY%HxJ_k?cJfHT!UOr(xQ)wMnOx>QM9UM0ABQX z3*-9;s%19~!mBz&P_BXmewaUz@|kSL*6}Q}*gTosRrF?teq96(du!-v8H{s7&hf;D zV_wCmblg}y1AFCd=Ep~7pp7ORys-R$0vfN;+1ggvBs?3;d^G5;$~uJeJMrcI`%uAD z1saz#8r0S%VoiqzlCV}3c=z`q;h%ntPzgdhQ>EF46H@pE-cB~hJfQO5J`=wG8_cPV zCu_E!V#3nw$bgRqP_#6kh$_lLwSF4e?HgZxKXekwylG5RX1)Vk_rvsNnFz94?1XRM zHiZQd)zl%-lq#I9AX;^YpzUW3x+X4$4sY7VcT2kfS5<1zLhDQTMvgixUN45_wOqLy zrEN%Z&0g4UKOUV=&0%WX#Bf3SK02~Rjz<0H&x{F3=6~s2#+81$17c(N%-ftol;1Rq zS-$)_DowAYW`jI&OprRQdE-g_9cH7$@>{UM+d%sC&@<|qF#72|quJ4qF z4Yxq%{uL&6@(g@1-U^MfyA7#3bs7!COz$9EB&hwIctVe$k)reXdPv?itj9vf)T zf&>{_Y~csjM`fWUKLgRT4hc?XkR2B78WUFFc(^UN0k*U^!uhG9kiA_TwVl65J{76Z zcK095GXIIpLOqsFJbM?C_Qu2Pt?F>zTpuz8w;&_EA5>Yu9=|yk#&^k|#8m7k#d%?F zcw$i;xmtD*=~V86!~9HYKW!Q!D~I4OLTX?T6GulpsY9_sAr_aXAo=6h(8PU{sA|7PVmNgYHVo%++36{uJB@|O zXU$O4i~D3tvnMW)N}{I4Eik%f7_a60HD0vYCT{z9IeK$@Saqd)8Ct;U!-?7mYHLp! z^YRIJ>N^gyaSZMBD??{PMd;WF6RL#wLz?GSG(RW-jVF)rg8d^Q;!+mhTs4n0h8{q} z@2$rxJXVo$I+K~zQBh#e9cKI{FT^Paq(GJ|;bpi*kUeYe^R@AMaxZK=EnL3=ec@tY zrQv6IJ8B;yx0W~b^tEz+``%p;ULi|L&BDQdtPJ*gu?uC-v4GGS z7U=7{VbH%xiKWHLbZEvGr0bRf6Yi>`McZ9*yzf-vruTz9b@D;0+$+(|kQ<=osKj*c zTnsyw1tZtgsA9iywXuO zz@z0o^NAUX_B{+n6Dl*&uoDYVd}J%iNK}F!i{4Y{lLo`B@bfZW+<|*iI|Mug8c41 zK$8~o(Zui9VVQ6aiJDMF!su1{W|1VMXoWKD(f1@*ED~MSut1Mr3ZlqODj>_mk?2bX z(D`aM*h+OkBGEzXBqo5-LmO1@C4fBNo5M7NEC>`5hdnF55u5odkb&`b^2tt(K3q4C zNj$9q*rOQwu`$e*EE~RlRT6V~-8ANLSP^MH{KGOhA&O?bI9n|uC{Hix855Md69$P$ zqc7{e!kB=ojE4GMH0!7Z&#H42a+e8(gX+6UVR;t*v0Mj1gf+0T@gU~qORUSv;6*yzY=ZZY9i7 z6h{XJKSxfwg*?@^w~(i80XmhcbV!x~7-<|tmy@G0PkTQ9BGX2L<4c%wQ9ctH5{Gmj z9AtFwgff}F33zwma1fYAm^b1B;PX^}X8PD%(kWHKpFHpq*)wZ2N-I;No|jiK$u>Jk z#d%ROTXPtWoWZBqNd>Bn+{xH8=!?&4h@mt06qhBNUQWF zQnbI2Y(BD!$BSPGpB*@KY>E+%u;%ggU>IGV+X%HcJ@AtH`Se1DD@vC*3i?|(^zfDg z3>dwQoX4MF*s}THEi(r#o1R5NC-YHowign+e;&PhrwV=#RLIF93fpEUVeL~F(diQ- znFGR;sdLUwV!2~1s$Dx0t9@yM%ISRYNVmcTNo&!|Ne@V}Q$H*%5sRBdD!|C>4mJLn z&6I9i431G!L-3>(hj{+6 zUx|OiUa~WT51WO{QCx_V<;+vwxNe0MJu9RLGnEwJw#q8lj@Q!}KQ=R8r|hRAJfaI(;{^+I0u=!YR z*d(b$xhv6lch}g|tR_lp1c#s!wSnv{8#yeLZ_iDB=!;`~V<3H+6mm!X_*VkNvFc)J zc8W$A@;v{Q8Io6k9|$Inogy4WpMU?toZYSs-+!z{8t=21f)n}VSotS%D$^d>xhn9& z#wOC5Ve=txj3Qi6ozC3Kc4w4o7tmcVRG~u*(QA9n==G_2jr_d7fPu9y zNN5+KaNEyx+GaC4p=+OnYu-;EyF22P7st`CWCJ|#;z?q3`3TH1vBFKm&%yDfyTL{I zFq$2BAFElZ02RATPvj({m(JI6Myo z1D64j7b0FZPP9-d18=mkq?I|%jDmU$e!X3UjMm#k4Kv>(*{*%V&n*t{I)n$|xi(-h z_5;nER*R-79AZ|FEds?^72wTHAQ7ftm?q8jAnbOErtBM!%@b6(NxG?2NUOopL%SGX zxAB7?BLeZ!+ws^WN{X)h7KZOUtpa6zDfBS7h5t1ABAv8j3;yVFj2L^j^0o@)BJH5L zAT2(aWt6Aj;%UK!mp z0Z*H@mp)t~$)%+UVBhF#*hE+qd-;iSvizM?b-_%!-)9K(?&k`k8NUE~Bw4ba_rIbn zy`Maz^V{(y&A~8g@CZ)pt37I!NQEm#8O$gL9dzjMM5y(hLCe`XMszX5#N7D;HW5d0 zLAwEsvdYK8Gp8_HhqlqLiY`d6bRCt79?JRZY=qW41-8jV8sDLl@wUkcj8@qXdf%@T zO>)honb(%V;A$2;J=Sox7qakP;Zk(bDixKhhoaECCD{C1I~Fjy3YreP;jMN65*O!j zd+w^hhXoBJJxGU}9&V4#-9;H0Vt{UG?ZpkY*O9xPJZ6hR=q%|&)O%$jZ@$W2JR?&~!LQ~nstX>oxe51t}ATaaxY``olzWYxgW+Gdeex?YuK;}B@6MXoDh_C!h?>U*#tNIAJVzU zzgZ4W5Mud`2XJEW4RkHjjP7qTL;ZqA;M`!UB)E~BSiC9&cHBf4rt8yYL*O7}cE3VZAYsfey1_y!KZZX=x0 z`r9(dleCs*N;e z^1k?E-BT$vFvJGOje7|9D}ONYlG|w9&l~jh?JO$4bTi$tcQAUjC=3s%h``<#+*sXG zW8AQ21qe?lgmcx?uy}4XHM#Eq#|Mt$&T7$2@1|*4VMmKQNu?lx7z_{Oc$f6<)NId?Fth5 z;uNnba|kE>WjtOfA;$Up7f~x^e|BMx3VY4j6|Y}96QZ%M;^qnC(C&gc^louAJ>l5^UT&gjXxcKeD^!|m3eeyZm$lQkH~i@H z_H(?KG=eI|$gz*#?SZY!3}AF^I)AhFRC+FVD!cROVVpbnD{5}cBkdDkz#C0>tf*B^ zWn+&b>&N4r593i=MZw3>BX%xl*OrP7@6PM3CXDv+?`4?okKku+^U#!wqp{|^ z42Ztp4oW8%Y{?Aqwv?lZ9t7b~q=gfIc@FM-PuWg(tn#;bJzp(+zTWX{+8B zMrN4;mt9;4EsG0j_RufLl9h+n%D0$2aDb<#Qk>mPe`stsddIcO#gN(6hjATp%H^QV#$8{nJOQg#-@x%rBXHt?-Plm}1rC$nfzQf173Zwj_604!o?e( zh}X7cBqota3pY*0Sp&yX)jTQAbG8IkUowW?ANZ71316f-=aRs6QVyHsaGaZ3|NlF@fVG+1Yk< zQsW^8NeiKbleZW%by*x#3=F(p$cXBNKvjebh^~zx3<)3u^Q36obWhZ}r=3~7N|`?S z8b_+9chGOwQ{i^4B=sDaiB4EMa<@koV9S=rjM3?pRN$xt=e&SW8x>=U(>B1PyBCQ^ zL_1x-`YrRZbsL_wqLGRS%mmdB)?Iy16!JUf;_8euNKkDIx7=I@m%|gHx~h}tE!u$( zO<6{l-WW)d;*6QVz{PMZPLsX)EE}7MFx+WVM?7VmGma5Y$KeB~;5|x@p=9+v8f;ty zm>s}=&30rjeA+_I!uF9D+ImQ#zCZiO(4MUpdyb~6mNTm*^N`i3%`pC1*P6I~6s%L@ zz{ZyH5~i49+0mWEBc+7C>H5zU-0=>yUS&(aE<$8HXX$xVxTXfwx(km66)OZOchR zsV`?F}S`6!SxWcKxilzlhsv2Fm9F< z7uD!UrM9=Bp%*Su;}5c&#>X{CZ;$~e|6vU@Cz#W+Tl2{JWxLot(p~GFtcjp-$%W2A zad?hFG@Vnz;CYTCX~&7-beo77H`BJ2M8H!l%NOS=K6f$~hB)B2ON3$NIB|MIIth`( z8(>I~9*Essj_evN>5Zq-?Alr{JZIcMJh&)^CWIYBp31HC^g|QkR%S=Z2^~6FOa?E# z(m7nQZMVx$m8wK}o)M?aCd}xj~9VZ?OKQBo!_D1IDU1AA}-Vl!W*G#6V2`3nf z9UIXGUq0NtcnH6@Iz}2Z9>I&xLty{U6Xe#CNZ#Okn#@AQ{^**|e~}3N0{x%S^~~gt;f4BUk11PHQ zI`ItHvsQB4LH)0=-nbv&xvFS*wFVx#Uz<&I?vIrsnwVE2tFbT}0n#>dtoyQEuxL&? z}@`tZnFb_uPP-g-6Xgxg6EkD-`|7QfCzLy zWh=e+<2&KbD*^EUpt!e$+IBKqY4GE;YX8dbr(Yg(qn4kQU9xENsekjbqv(&!PQEPSRP^Cv; zvOp9@j`K$TH|p`R-Jg)r(x?1$wNr5G_x-52^%PzloePcp4Adr*#h;R(Ozjlq;FYZ# zQCjht5sQ{Ud$Ky|wZZf8`ygAi;rtOgC`l16zUK2K#8aT*$x?PzQX8|+YckZynbFO* z)3J`gbP}i(iY59dgH-lac=hBAI(lmsyff=hk1Y5?E0>DWkalxM&BzeO>3YB=t_||# z9z*u&aL`_SoH0;tAbV|f>2UcKbfLp_UWl0yZn``kDdYu0y}Bg~pBD$Vg^s8}VFwc- zT>uZC?IkiI?Qquj3KH>;1Jm0raCCSSWBu(W@_ic1)BCJJ3ZA!+^aC$RZQua9Z{qP8%ON^P zm@D|!#@zTCj!T9uWE?h)CJwn*k$##fF7LOWi0@lNHqA@H8U5yi*RY$&OX5CJcfN~) z&ir76R`^h9spWjp#Dl2i>Q3gmKr&u%sDzp)6~NHa&DCel8_}orV(5mb2d=|1*!uE8 zNPPK(n3@?7@0lz$$}d3MnqqPMk8*ykZZI^4ZbPrHxq-b?2N{0(dDpp-CQ#WYk7Q>Z zMmATRk>&f3%q_AXBCm;4brUK2@w6cEofiOeQU#IsT?JZWo{dzjb@9Efc2}Rf0~9W| zG7+)!QP8e;RWg3XWZLHSbV&AnM(kTZ6reQ>KOCclttE4*^W0&OKUDz>4ahhqDLeue z)GP6d1z+*T$g44xc4LWs`B$`S=r^9V_YE{`kN_kl*P>EUWnS*K9A>w@IXPpjk9CZ? z&KHk6O8Z~Y#PpRGU9E3NoEFA`Ro*tz`P3ZWPnw6+2k61Wss`jRz?1B`{sIE-TT*cw zOT2U50ji~&2?7Tw({w2d%x4I}&$S21^GRRGgj-^~`e822E?;GshQ)9~Of;OykD}A& zJHqgg7!o-n37QJlcg;)sFg0Wpd@@p`wO3C=%=1xX(y2pOb7?aZxL=KWPj|sC&+kI^ z%%;Q?c^|;#yCSkvM_t03WAF#4cmBw2QVk%;G zF|`V|kUlyer}}I|4wZ{&!<}?`G%p&x+c$#Fm9$|lKGOn`XIt@L)hZ}+SdT>Vk79#! zvyfwLG0!=}g|4<^)qSv&wPApT?)c<}Hd{XT>g~r{)ZD ze_sd7Tb|JBTn!qXiC~(hE-Pw(fZVI3)Gq%5j(ReaHV8;j1xIJp{CON}asCOTK5ZuV z^UQO6`s@+BKum;Oc@a%)vtQ#A-->bRvol~|8cAL~I)#O`CLurJ8I0yDjxn!k1%3U) zh&!u_Hr_M@(V&O4ajh`U4L^g7Vy&oUz#){DJ`N;L31h1gEfihSpUw9RAyTD7sYRLr zmla^dS6HCLc?HhKog+M{cL>YeiM60V?n%;;aj|%8un5Q&#N(40=c(}m2YPu_4Z1q= zB%0rH2et1h#%`YFG$`jP4EbCN{0oaw?&K6IE8lf~dQU6w-qGU}Cr`xhoK%<>&S$Zw zjsqS&BAI&Iq~M};YIJht29l(w{EeJNI}=@CTU!#dXxdhy)FI4T`NB4@&fI(joT)K9I zW@#ExX@w}NbZRG_B^W{auQJCL9y9697dz;IYtxbHp2g;S4x-!acIe=} z=G`O~^p>F*dt>niQapMKyg2Vd^ICZ{Y-kEx?es(a6CzNtjtJ)wa1nJ}iKH>XXQ;!X zO(-FEB2GS$#_r0lpl5PUfp4Q9LOC?n}5n-={whP;`?BHRB$YwYPHZn2Yn#Bl6{ci#fFTZ zjyHQ0pp)B`SbkeJ9{If$HU;lfo< zdU?Qg-0|ZJK2m86&gPHsDTQOCMADl5sMAR*2Y$up8ceuja!<*U{L8c=^9B;Ttwd{` z9jW)8YAXC%ibknV;HuqEG3HS!tlp0p>U*1E(@d4wtoh{#WlW=$YvkBWldi4YbJ@`L zpome-cnh-**sxpQr{a!7;Y{Z93i#NnLs3Z(Tjh0{n$!kyA$FZ~C|?9CG!Do6535t- z_SyJ-WlIrka?TyR*3o$}rvG@d3<52OiNV?x=R)8|3nJ&e=O8gUaMbMS;+;cS5FH9T#x z4vZFy$NR47Q?*@MTc_LLHl8b zd>c)X9SkX=M_Aj7shnRy16Mabmn(GCWJQ9bSwpqQbiRKs)CnBG5_O8m&haBims8Fx z@)Y(H+KKA5Q^}oW87!}HH0wEe9n&J2)%B0z31?-SMzy4ma%~4TAiss#?90MBModSX zwOKh6oNGha!ZSX!ET)qlKBK^j9vi`WzLjU&EBCPLQ%0bInW3!s*j@B+OFxPpl#?eZ zU1xV|Yw-Lh5oki=R!-*Qds0g*IP5Bmoh&A?lMMGWmbWdqIeF4t)P+P2Zn7x0-*o)i z26cZG*g)Ij~djNws6WCwEb96MoxT%(qh)=Hx7WaYM-tw(OoAw@G{l z_hOU*r};PspIfiO#teE2N+a}elTI`~+gQSRxNae;i&k<`sh3fak08WtvVuaHP&W9w zD4TVo9l4arfx?^=Zi&YNfr5MM@9$hVP1+0cb2^2jaN8H zvR|GcWenk-u3gPNU3i$Ak=q|luNP%ql}gzg&z7-=hkS%RJ5y<^y)1rn_b#sZRLu^5 zkb-Xyl*Y54r7{AN%h+?JrflATON`6RZEz^?3|;)8gr?bLbNAE?L8hq;?=%^Vf{YZ| zS!E-jS>qHZ5Ys{1hxOw=?sR06Q;Rv-Vk6Yy=!D!{1ds!3z@EFdj^5MuqnbOi+3~yW z;i2~>c5a&(9h$n8YdkRmW-89(W=L0{+M8MI2-mCpkyW+a2BVSOrne&WyE>ms5XeBQ zTMzQwQ%gAE;`yw9$6c7$qyXDB(vjvAH||2+KHT2njBYCBz`k)N*s#8wlOCHL4$hkNIG&Fy9~dm669W=XrQ;kJZPih4m!xE>)hT~S8j)XEHmwc z2D@i%0R?;vZ%t_A?J@M>r?cVs$Ar^#VOuWEo@a_Tx2{1}7cr7xVuE*)EIK@;S z62Q~cYhcyHB6wVB%Gy3WhgIIrW`$1{;n?$fcuZ+0zW8Yq9o?bME%8*q>lAHSv+7ms z%zke8xM?b*?6ClO@7Rs}_bIX4(pYqM##pj(_9k{>_HlktojRv?doP@J3+9Ah#4vBB zf5cbrI6}mRaK`gh6U}&4OG{IDtpDiQoORI&$S8~F1~;5x!rRku%O+8BTr`lbUhT;q z%NGSl)fm>!Y7n(_`$!M%GexhP6}a6O%$QZt7r18;Mv%ZC&rUi%n3FeiU>8O_q>Sm- zuJ4aa%rL91oQlgC8jyAqI?5DKlV~P0`y=KYm(Ic81I8lX5kv7cjVI`Ia1>fNP?D8e zt%N_+IcnkOhoroY(1|GrcS zg_K^#E0T_*q|=A_6Svv1j=39=|C__K&2T%=5OJ=vDU&RTL)7X+44CeoPF>el(6P&s z@hw9OPUx{2$8QdW+aXq5$s9v?DlUN+jy;Pugqg9C>%QZftHU8!O@*~wz7>D-OrfsL zHF$hZ6BXu}P`~$kxFx4ENq_SZto4L_v?WK5-FcEnJ7vY+H1F%SeK3&~U7H&_>$9n?Spi*&RcJBdWjI-SFNvyD`R1OAH1uEb zMYNr9n$~<2*7#W{0#P3^UEFM1WI&Y(- z4$Cj!WwOW}v>~d2#tk|GiUv{Wq~vu-;r7uhp8aU-^H929K%EM<%|JeF1=#nkKDys_ zn1DnW{cf#94b->e_fOuC6@g#S`9y2j+oi8|Q8ipQv4rXP{1X001?S>Ob)JTCI;c)^ z>t>~)Eq5D6Zs+&DKV+l^C0z`O(1lPlO|c9Uxl?0I(Z#ILCYMerWplpY@2@n(qO!w; zk?3-|l1fqP*ggACywCG~p7(jzzIp^gSv|X&tqwsMwQ%a#EEaKo8eiaV$@edG>jHS1FY!Z;)iUovi$Th@Y{# z59BU2vSQt9SasYE+&oQCrS0Ez*mDN%vf2r?S)DjX&y}t`C&kjyYc$3$mgyB~@&?B9 zc>}LN@;o_Cn77Xa?xeK=|T7MT5flB8%}H zfP?bqxV687$ZZ~>g`29Nv(b~4-p|LNP3a)o;mxi4riLx>vKL}HMJ(Z-0Xzn0=FvOQ5%~hc5@eXS`89eK6k*=R92{BW$9_21#70jiVS}5#urjHS zSDUHJ!zPI!k2;Aa1#?(auLE(am9l{M6(~AojFl4)$!dow2+o;;+qSDvYn4)pO>E%X z&^T^vJdB0@T1;nuc^h8_q|pH@Gp^{rON6AUX0*Bciu=!MGr{Sn9-{Ri4!^q|Dd`K= z;g>9_q%DnNy5X#ZWJZUe<8e-rI$K8x?`|3y?+*`Z8voL$nr#ingZ z!NJ?sH1VUH7L#li9&AiO2%yv2Od+v;5d`1-0{s#-nfjaOM~peXJ#hb?b=s+C!XSx{TB`aFP>ex}j|P zB%oobOY7A<^40z@e5**IbHg-Y4*ntW*1iR`xicYqfe5dhmXZ%s=FI!88Kk=g&}{j8 zZtM1)w6HvlT8ga6k|GIx8>_+uWv1q5pv{cjbXeKRhmxZ~OF3;V7nnXFrS7qr=sw>b zqq7|G`Fj;stn3)r&N3w?qxleX>IDSb)xr6_>u42!l&-IE;9^umSxc-h=4F|4O-@nV zNsk1WC{Dn;CFag*>s#URtDk6Ba6KKEk&VMMk5h?d3U=<|@NN8D{A;5P#st}O)~+Ic zjY>7`x;4V|8giZXyxxzHA5ViETqHMaij+BrFJ|r&<3LHX^9{$fc+Wu{9*-|ZgHj!8 zSmB6|PA>zaDmmTmbClfKQb;pu#IWVuOt4}SHdR_m%=Z>??Zt=K-U%-*y5uZ#jk-@h z%>EiKD4F=W?nqjBSt{ET-pxgg_%RJD18_L6^a$e`_(ZFhnk}!9&6?6+v2hQeH+t4*cJ6u~KxuFzZ>kv3 zih=iaqN9@ac%5Br~x>VK(*Q{NsMVe zUe-j|w9^meFXxf8o+Y?4(h%dj#n?aSLj$#LLHnO_F5=!5VzlK6dG$F1O0BDC=k){{ z+M0>Jw>8-$Da@fnf>qIsqlGWR8=Q`?~aJS5fnU(xP>zh`g<1aJl&5Q2r)3s*m z-)h2Eed=V(#(x0q&S_ZXJt_HJnSojFJB4K_yV3p8J9s*%z+wL^5I^-{+Z@F3a#05x zEs166`yLRFufNB<5D#W~<)APt!vSM*7L%fFY7J<2Le}E88LZb66@;`p2D~hdY>3Av|svLrQQYr4L`x}>KXC%a}I0Vz3H=y{LDNWi{3p1P* zkecPlvOKCGFDDy=t0<_oRG>FnaKX0lh+-a>~$mHz*ehMO8tHL!dUCz4d>!CmRFG6GHT;b=?a;TnP4nAL42uYs|gm5cD z!*tR~;pBB_y8Rn#6Nb2CFGGlsnbXtRdH8I+2@{sR#_xX*WV8G3;|uj082(!t(kP`q zS7%_wF+IL$#~>!Fi-bIF3*7vTnE!Ncf({O8(hCkf7)@;vdSnu*zbBn+{wE!7Nx|Ci u3cR$wjA+C=3R~^};3nSm(Oxqp-(DGqh8L7OY>OM($p9}Wt-)KZn(Tj?o?grV diff --git a/tensorlayer/optimizers/dragon_optimizers.py b/tensorlayer/optimizers/dragon_optimizers.py deleted file mode 100644 index 523e785..0000000 --- a/tensorlayer/optimizers/dragon_optimizers.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import absolute_import, division, print_function -import dragon as dg - -__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Admax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS'] - -# Add module aliases - - -# learning_rate=0.001, rho=0.95, epsilon=1e-07, name='Adadelta' -def Adadelta(**kwargs): - raise NotImplementedError('Adadelta optimizer function not implemented') - - -# learning_rate=0.001, initial_accumulator_value=0.1, epsilon=1e-07,name='Adagrad' -def Adagrad(**kwargs): - raise NotImplementedError('Adagrad optimizer function not implemented') - - -# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False,name='Adam' -Adam = dg.optimizers.Adam - - -# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Adamax' -def Admax(**kwargs): - raise NotImplementedError('Admax optimizer function not implemented') - - -# learning_rate=0.001, learning_rate_power=-0.5, initial_accumulator_value=0.1, -# l1_regularization_strength=0.0, l2_regularization_strength=0.0, name='Ftrl',l2_shrinkage_regularization_strength=0.0 -def Ftrl(**kwargs): - raise NotImplementedError('Ftrl optimizer function not implemented') - - -# learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam', -def Nadam(**kwargs): - raise NotImplementedError('Nadam optimizer function not implemented') - - -# learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-07, centered=False,name='RMSprop' -RMSprop = dg.optimizers.RMSprop - -# learning_rate=0.01, momentum=0.0, nesterov=False, name='SGD' -SGD = dg.optimizers.SGD - - -# learning_rate, momentum, use_locking=False, name='Momentum', use_nesterov=False -def Momentum(**kwargs): - raise NotImplementedError('Momentum optimizer function not implemented') - - -def Lamb(**kwargs): - raise NotImplementedError('Lamb optimizer function not implemented') - - -def LARS(**kwargs): - raise NotImplementedError('LARS optimizer function not implemented') diff --git a/tensorlayer/optimizers/load_optimizers_backend.py b/tensorlayer/optimizers/load_optimizers_backend.py index 31a905a..0fc4c08 100644 --- a/tensorlayer/optimizers/load_optimizers_backend.py +++ b/tensorlayer/optimizers/load_optimizers_backend.py @@ -8,8 +8,6 @@ if BACKEND == 'tensorflow': from .tensorflow_optimizers import * elif BACKEND == 'mindspore': from .mindspore_optimizers import * -elif BACKEND == 'dragon': - from .dragon_optimizers import * elif BACKEND == 'paddle': from .paddle_optimizers import * else: diff --git a/tensorlayer/optimizers/mindspore_optimizers.py b/tensorlayer/optimizers/mindspore_optimizers.py index dd70e5f..6472d4e 100644 --- a/tensorlayer/optimizers/mindspore_optimizers.py +++ b/tensorlayer/optimizers/mindspore_optimizers.py @@ -23,7 +23,7 @@ class Adagrad(Cell): def __init__(self): pass - def app_gradients(self): + def apply_gradients(self): raise Exception('Adagrad optimizer function not implemented') @@ -55,7 +55,7 @@ class Adamax(Cell): def __init__(self): pass - def app_gradients(self): + def apply_gradients(self): raise Exception('Adamax optimizer function not implemented') @@ -64,7 +64,7 @@ class Ftrl(Cell): def __init__(self): pass - def app_gradients(self): + def apply_gradients(self): raise Exception('Ftrl optimizer function not implemented') @@ -73,7 +73,7 @@ class Nadam(Cell): def __init__(self): pass - def app_gradients(self): + def apply_gradients(self): raise Exception('Nadam optimizer function not implemented') @@ -82,7 +82,7 @@ class RMSprop(Cell): def __init__(self): pass - def app_gradients(self): + def apply_gradients(self): raise Exception('RMSprop optimizer function not implemented') @@ -91,7 +91,7 @@ class RMSprop(Cell): def __init__(self): pass - def app_gradients(self): + def apply_gradients(self): raise Exception('RMSprop optimizer function not implemented') diff --git a/tensorlayer/package_info.py b/tensorlayer/package_info.py index 1efbae6..9a688fe 100644 --- a/tensorlayer/package_info.py +++ b/tensorlayer/package_info.py @@ -2,22 +2,22 @@ # -*- coding: utf-8 -*- """Deep learning and Reinforcement learning library for Researchers and Engineers.""" -MAJOR = 3 +MAJOR = 1 MINOR = 0 PATCH = 0 -PRE_RELEASE = '' +PRE_RELEASE = 'alpha' # Use the following formatting: (major, minor, patch, prerelease) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) __shortversion__ = '.'.join(map(str, VERSION[:3])) __version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:]) -__package_name__ = 'tensorlayer' +__package_name__ = 'tensorlayer3' __contact_names__ = 'TensorLayer Contributors' __contact_emails__ = 'tensorlayer@gmail.com' -__homepage__ = 'http://tensorlayer.readthedocs.io/en/latest/' -__repository_url__ = 'https://github.com/tensorlayer/tensorlayer' -__download_url__ = 'https://github.com/tensorlayer/tensorlayer' +__homepage__ = 'https://tensorlayer3.readthedocs.io/en/latest/' +__repository_url__ = 'https://git.openi.org.cn/TensorLayer/tensorlayer3.0' +__download_url__ = 'https://git.openi.org.cn/TensorLayer/tensorlayer3.0' __description__ = 'High Level Tensorflow Deep Learning Library for Researcher and Engineer.' __license__ = 'apache' __keywords__ = 'deep learning, machine learning, computer vision, nlp, ' diff --git a/tensorlayer/vision/__init__.py b/tensorlayer/vision/__init__.py new file mode 100644 index 0000000..9f0fc8e --- /dev/null +++ b/tensorlayer/vision/__init__.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from . import transforms diff --git a/tensorlayer/vision/functional_cv2.py b/tensorlayer/vision/functional_cv2.py new file mode 100644 index 0000000..de8e18e --- /dev/null +++ b/tensorlayer/vision/functional_cv2.py @@ -0,0 +1,667 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import numpy as np +from numpy import sin, cos, tan +import math +import numbers +import importlib + + +def try_import(module_name): + """Try importing a module, with an informative error message on failure.""" + install_name = module_name + + if module_name.find('.') > -1: + install_name = module_name.split('.')[0] + + if module_name == 'cv2': + install_name = 'opencv-python' + + try: + mod = importlib.import_module(module_name) + return mod + except ImportError: + err_msg = ( + "Failed importing {}. This likely means that some paddle modules " + "require additional dependencies that have to be " + "manually installed (usually with `pip install {}`). " + ).format(module_name, install_name) + raise ImportError(err_msg) + + +def crop(image, offset_height, offset_width, target_height, target_width): + image_height, image_width = image.shape[0:2] + if offset_width < 0: + raise ValueError('offset_width must be >0.') + if offset_height < 0: + raise ValueError('offset_height must be >0.') + if target_height < 0: + raise ValueError('target_height must be >0.') + if target_width < 0: + raise ValueError('target_width must be >0.') + if offset_width + target_width > image_width: + raise ValueError('offset_width + target_width must be <= image width.') + if offset_height + target_height > image_height: + raise ValueError('offset_height + target_height must be <= image height.') + + return image[offset_height:offset_height + target_height, offset_width:offset_width + target_width] + + +def center_crop(image, size, central_fraction): + + image_height, image_width = image.shape[0:2] + if size is not None: + if not isinstance(size, (int, list, tuple)) or (isinstance(size, (list, tuple)) and len(size) != 2): + raise TypeError( + "Size should be a single integer or a list/tuple (h, w) of length 2.But" + "got {}.".format(size) + ) + + if isinstance(size, int): + target_height = size + target_width = size + else: + target_height = size[0] + target_width = size[1] + + elif central_fraction is not None: + if central_fraction <= 0.0 or central_fraction > 1.0: + raise ValueError('central_fraction must be within (0, 1]') + + target_height = int(central_fraction * image_height) + target_width = int(central_fraction * image_width) + + crop_top = int(round((image_height - target_height) / 2.)) + crop_left = int(round((image_width - target_width) / 2.)) + + return crop(image, crop_top, crop_left, target_height, target_width) + + +def pad(image, padding, padding_value, mode): + + if isinstance(padding, int): + top = bottom = left = right = padding + + elif isinstance(padding, (tuple, list)): + if len(padding) == 2: + left = right = padding[0] + top = bottom = padding[1] + elif len(padding) == 4: + left = padding[0] + top = padding[1] + right = padding[2] + bottom = padding[3] + else: + raise TypeError("The size of the padding list or tuple should be 2 or 4." "But got {}".format(padding)) + else: + raise TypeError("Padding can be any of: a number, a tuple or list of size 2 or 4." "But got {}".format(padding)) + + if mode not in ['constant', 'edge', 'reflect', 'symmetric']: + raise ValueError("Padding mode should be 'constant', 'edge', 'reflect', or 'symmetric'.") + cv2 = try_import('cv2') + _cv2_pad_from_str = { + 'constant': cv2.BORDER_CONSTANT, + 'edge': cv2.BORDER_REPLICATE, + 'reflect': cv2.BORDER_REFLECT_101, + 'symmetric': cv2.BORDER_REFLECT + } + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.copyMakeBorder( + image, top=top, bottom=bottom, left=left, right=right, borderType=_cv2_pad_from_str[mode], + value=padding_value + )[:, :, np.newaxis] + else: + return cv2.copyMakeBorder( + image, top=top, bottom=bottom, left=left, right=right, borderType=_cv2_pad_from_str[mode], + value=padding_value + ) + + +def resize(image, size, method): + + if not (isinstance(size, int) or (isinstance(size, (list, tuple)) and len(size) == 2)): + raise TypeError('Size should be a single number or a list/tuple (h, w) of length 2.' 'Got {}.'.format(size)) + if method not in ('nearest', 'bilinear', 'area', 'bicubic' 'lanczos'): + raise ValueError( + "Unknown resize method! resize method must be in " + "(\'nearest\',\'bilinear\',\'bicubic\',\'area\',\'lanczos\')" + ) + cv2 = try_import('cv2') + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + + h, w = image.shape[:2] + + if isinstance(size, int): + if (w <= h and w == size) or (h <= w and h == size): + return image + if w < h: + target_w = size + target_h = int(size * h / w) + else: + target_h = size + target_w = int(size * w / h) + size = (target_h, target_w) + output = cv2.resize(image, dsize=(size[1], size[0]), interpolation=_cv2_interp_from_str[method]) + if len(image.shape) == 3 and image.shape[2] == 1: + return output[:, :, np.newaxis] + else: + return output + + +def transpose(image, order): + + if not (isinstance(order, (list, tuple)) and len(order) == 3): + raise TypeError("Order must be a list/tuple of length 3." "But got {}.".format(order)) + + image_shape = image.shape + if len(image_shape) == 2: + image = image[..., np.newaxis] + + return image.transpose(order) + + +def hwc_to_chw(image): + + image_shape = image.shape + if len(image_shape) == 2: + image = image[..., np.newaxis] + + return image.transpose((2, 0, 1)) + + +def chw_to_hwc(image): + + image_shape = image.shape + if len(image_shape) == 2: + image = image[..., np.newaxis] + + return image.transpose((1, 2, 0)) + + +def rgb_to_hsv(image): + + cv2 = try_import('cv2') + image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) + return image + + +def hsv_to_rgb(image): + + cv2 = try_import('cv2') + image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB) + return image + + +def rgb_to_gray(image, num_output_channels): + + cv2 = try_import('cv2') + + if num_output_channels == 1: + image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis] + elif num_output_channels == 3: + image = np.broadcast_to(cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis], image.shape) + else: + raise ValueError('num_output_channels should be either 1 or 3') + + return image + + +def adjust_brightness(image, brightness_factor): + if brightness_factor < 0: + raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor)) + cv2 = try_import('cv2') + + table = np.array([i * brightness_factor for i in range(0, 256)]).clip(0, 255).astype('uint8') + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.LUT(image, table)[:, :, np.newaxis] + else: + return cv2.LUT(image, table) + + +def adjust_contrast(image, contrast_factor): + """Adjusts contrast of an image. + + Args: + img (np.array): Image to be adjusted. + contrast_factor (float): How much to adjust the contrast. Can be any + non negative number. 0 gives a solid gray image, 1 gives the + original image while 2 increases the contrast by a factor of 2. + + Returns: + np.array: Contrast adjusted image. + + """ + if contrast_factor < 0: + raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor)) + cv2 = try_import('cv2') + + table = np.array([(i - 127) * contrast_factor + 127 for i in range(0, 256)]).clip(0, 255).astype('uint8') + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.LUT(image, table)[:, :, np.newaxis] + else: + return cv2.LUT(image, table) + + +def adjust_hue(image, hue_factor): + """Adjusts hue of an image. + + The image hue is adjusted by converting the image to HSV and + cyclically shifting the intensities in the hue channel (H). + The image is then converted back to original image mode. + + `hue_factor` is the amount of shift in H channel and must be in the + interval `[-0.5, 0.5]`. + + Args: + image (PIL.Image): PIL Image to be adjusted. + hue_factor (float): How much to shift the hue channel. Should be in + [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in + HSV space in positive and negative direction respectively. + 0 means no shift. Therefore, both -0.5 and 0.5 will give an image + with complementary colors while 0 gives the original image. + + Returns: + PIL.Image: Hue adjusted image. + + """ + if not (-0.5 <= hue_factor <= 0.5): + raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor)) + cv2 = try_import('cv2') + + dtype = image.dtype + image = image.astype(np.uint8) + hsv_img = cv2.cvtColor(image, cv2.COLOR_RGB2HSV_FULL) + h, s, v = cv2.split(hsv_img) + + alpha = np.random.uniform(hue_factor, hue_factor) + h = h.astype(np.uint8) + # uint8 addition take cares of rotation across boundaries + with np.errstate(over="ignore"): + h += np.uint8(alpha * 255) + hsv_img = cv2.merge([h, s, v]) + return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB_FULL).astype(dtype) + + +def adjust_saturation(image, saturation_factor): + """Adjusts color saturation of an image. + + Args: + image (np.array): Image to be adjusted. + saturation_factor (float): How much to adjust the saturation. 0 will + give a black and white image, 1 will give the original image while + 2 will enhance the saturation by a factor of 2. + + Returns: + np.array: Saturation adjusted image. + + """ + if saturation_factor < 0: + raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor)) + cv2 = try_import('cv2') + + dtype = image.dtype + image = image.astype(np.float32) + alpha = np.random.uniform(saturation_factor, saturation_factor) + gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + gray_img = gray_img[..., np.newaxis] + img = image * alpha + gray_img * (1 - alpha) + return img.clip(0, 255).astype(dtype) + + +def hflip(image): + """Horizontally flips the given image. + + Args: + image (np.array): Image to be flipped. + + Returns: + np.array: Horizontall flipped image. + + """ + cv2 = try_import('cv2') + + return cv2.flip(image, 1) + + +def vflip(image): + """Vertically flips the given np.array. + + Args: + image (np.array): Image to be flipped. + + Returns: + np.array: Vertically flipped image. + + """ + cv2 = try_import('cv2') + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.flip(image, 0)[:, :, np.newaxis] + else: + return cv2.flip(image, 0) + + +def padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value): + ''' + + Parameters + ---------- + image: + A np.array image to be padded size of (target_width, target_height) + offset_height: + Number of rows of padding_values to add on top. + offset_width: + Number of columns of padding_values to add on the left. + target_height: + Height of output image. + target_width: + Width of output image. + padding_value: + value to pad + + Returns: + np.array image: padded image + ------- + + ''' + if offset_height < 0: + raise ValueError('offset_height must be >= 0') + if offset_width < 0: + raise ValueError('offset_width must be >= 0') + + height, width = image.shape[:2] + after_padding_width = target_width - offset_width - width + after_padding_height = target_height - offset_height - height + if after_padding_height < 0: + raise ValueError('image height must be <= target - offset') + if after_padding_width < 0: + raise ValueError('image width must be <= target - offset') + + return pad( + image, padding=(offset_width, offset_height, after_padding_width, after_padding_height), + padding_value=padding_value, mode='constant' + ) + + +def rotate(img, angle, interpolation, expand, center, fill): + """Rotates the image by angle. + + Args: + img (np.array): Image to be rotated. + angle (float or int): In degrees degrees counter clockwise order. + interpolation (int|str, optional): Interpolation method. If omitted, or if the + image has only one channel, it is set to cv2.INTER_NEAREST. + when use cv2 backend, support method are as following: + - "nearest": cv2.INTER_NEAREST, + - "bilinear": cv2.INTER_LINEAR, + - "bicubic": cv2.INTER_CUBIC + expand (bool, optional): Optional expansion flag. + If true, expands the output image to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center (2-tuple, optional): Optional center of rotation. + Origin is the upper left corner. + Default is the center of the image. + fill (3-tuple or int): RGB pixel fill value for area outside the rotated image. + If int, it is used for all channels respectively. + + Returns: + np.array: Rotated image. + + """ + cv2 = try_import('cv2') + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + h, w, c = img.shape + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + if center is None: + center = (w / 2.0, h / 2.0) + M = cv2.getRotationMatrix2D(center, angle, 1) + + if expand: + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + # calculate output size + xx = [] + yy = [] + + angle = -math.radians(angle) + expand_matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + post_trans = (0, 0) + expand_matrix[2], expand_matrix[5] = transform( + -center[0] - post_trans[0], -center[1] - post_trans[1], expand_matrix + ) + expand_matrix[2] += center[0] + expand_matrix[5] += center[1] + + for x, y in ((0, 0), (w, 0), (w, h), (0, h)): + x, y = transform(x, y, expand_matrix) + xx.append(x) + yy.append(y) + nw = math.ceil(max(xx)) - math.floor(min(xx)) + nh = math.ceil(max(yy)) - math.floor(min(yy)) + + M[0, 2] += (nw - w) * 0.5 + M[1, 2] += (nh - h) * 0.5 + + w, h = int(nw), int(nh) + + if len(img.shape) == 3 and img.shape[2] == 1: + return cv2.warpAffine(img, M, (w, h), flags=_cv2_interp_from_str[interpolation], borderValue=fill)[:, :, + np.newaxis] + else: + return cv2.warpAffine(img, M, (w, h), flags=_cv2_interp_from_str[interpolation], borderValue=fill) + + +def get_affine_matrix(center, angle, translate, scale, shear): + + rot = math.radians(angle) + sx, sy = [math.radians(s) for s in shear] + + cx, cy = center + tx, ty = translate + + # RSS without scaling + a = math.cos(rot - sy) / math.cos(sy) + b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot) + c = math.sin(rot - sy) / math.cos(sy) + d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot) + + # Inverted rotation matrix with scale and shear + # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1 + matrix = [d, -b, 0.0, -c, a, 0.0] + matrix = [x / scale for x in matrix] + + # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1 + matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty) + matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty) + + # Apply center translation: C * RSS^-1 * C^-1 * T^-1 + matrix[2] += cx + matrix[5] += cy + + return matrix + + +def random_shear(image, degrees, interpolation, fill): + + cv2 = try_import('cv2') + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + + h, w, c = image.shape + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + center = (w / 2.0, h / 2.0) + shear = [-np.random.uniform(degrees[0], degrees[1]), -np.random.uniform(degrees[2], degrees[3])] + + matrix = get_affine_matrix(center=center, angle=0, translate=(0, 0), scale=1.0, shear=shear) + matrix = np.asarray(matrix).reshape((2, 3)) + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], + borderValue=fill)[:, :, np.newaxis] + else: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], borderValue=fill) + + +def random_shift(image, shift, interpolation, fill): + + cv2 = try_import('cv2') + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + + h, w, c = image.shape + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + hrg = shift[0] + wrg = shift[1] + tx = -np.random.uniform(-hrg, hrg) * w + ty = -np.random.uniform(-wrg, wrg) * h + center = (w / 2.0, h / 2.0) + + matrix = get_affine_matrix(center=center, angle=0, translate=(tx, ty), scale=1.0, shear=(0, 0)) + matrix = np.asarray(matrix).reshape((2, 3)) + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], + borderValue=fill)[:, :, np.newaxis] + else: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], borderValue=fill) + + +def random_zoom(image, zoom, interpolation, fill): + cv2 = try_import('cv2') + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + + h, w, c = image.shape + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + scale = 1 / np.random.uniform(zoom[0], zoom[1]) + center = (w / 2.0, h / 2.0) + + matrix = get_affine_matrix(center=center, angle=0, translate=(0, 0), scale=scale, shear=(0, 0)) + matrix = np.asarray(matrix).reshape((2, 3)) + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], + borderValue=fill)[:, :, np.newaxis] + else: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], borderValue=fill) + + +def random_affine(image, degrees, shift, zoom, shear, interpolation, fill): + cv2 = try_import('cv2') + _cv2_interp_from_str = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'area': cv2.INTER_AREA, + 'bicubic': cv2.INTER_CUBIC, + 'lanczos': cv2.INTER_LANCZOS4 + } + h, w, c = image.shape + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + center = (w / 2.0, h / 2.0) + + angle = -float(np.random.uniform(degrees[0], degrees[1])) + + if shift is not None: + max_dx = float(shift[0] * h) + max_dy = float(shift[1] * w) + tx = -int(round(np.random.uniform(-max_dx, max_dx))) + ty = -int(round(np.random.uniform(-max_dy, max_dy))) + shift = [tx, ty] + else: + shift = [0, 0] + + if zoom is not None: + scale = 1 / np.random.uniform(zoom[0], zoom[1]) + else: + scale = 1.0 + + shear_x = shear_y = 0.0 + if shear is not None: + shear_x = float(np.random.uniform(shear[0], shear[1])) + if len(shear) == 4: + shear_y = float(np.random.uniform(shear[2], shear[3])) + shear = (-shear_x, -shear_y) + + matrix = get_affine_matrix(center=center, angle=angle, translate=shift, scale=scale, shear=shear) + matrix = np.asarray(matrix).reshape((2, 3)) + + if len(image.shape) == 3 and image.shape[2] == 1: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], + borderValue=fill)[:, :, np.newaxis] + else: + return cv2.warpAffine(image, matrix, (w, h), flags=_cv2_interp_from_str[interpolation], borderValue=fill) diff --git a/tensorlayer/vision/functional_pil.py b/tensorlayer/vision/functional_pil.py new file mode 100644 index 0000000..124b870 --- /dev/null +++ b/tensorlayer/vision/functional_pil.py @@ -0,0 +1,554 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import PIL +from PIL import Image, ImageOps, ImageEnhance +import numpy as np +import colorsys +import random +import math +from numpy import sin, cos, tan +import numbers + +_pil_interp_from_str = { + 'nearest': Image.NEAREST, + 'bilinear': Image.BILINEAR, + 'bicubic': Image.BICUBIC, + 'box': Image.BOX, + 'lanczos': Image.LANCZOS, + 'hamming': Image.HAMMING +} + + +def crop(image, offset_height, offset_width, target_height, target_width): + image_width, image_height = image.size + if offset_width < 0: + raise ValueError('offset_width must be >0.') + if offset_height < 0: + raise ValueError('offset_height must be >0.') + if target_height < 0: + raise ValueError('target_height must be >0.') + if target_width < 0: + raise ValueError('target_width must be >0.') + if offset_width + target_width > image_width: + raise ValueError('offset_width + target_width must be <= image width.') + if offset_height + target_height > image_height: + raise ValueError('offset_height + target_height must be <= image height.') + + return image.crop((offset_width, offset_height, offset_width + target_width, offset_height + target_height)) + + +def center_crop(image, size, central_fraction): + + image_width, image_height = image.size + if size is not None: + if not isinstance(size, (int, list, tuple)) or (isinstance(size, (list, tuple)) and len(size) != 2): + raise TypeError( + "Size should be a single integer or a list/tuple (h, w) of length 2.But" + "got {}.".format(size) + ) + + if isinstance(size, int): + target_height = size + target_width = size + else: + target_height = size[0] + target_width = size[1] + + elif central_fraction is not None: + if central_fraction <= 0.0 or central_fraction > 1.0: + raise ValueError('central_fraction must be within (0, 1]') + + target_height = int(central_fraction * image_height) + target_width = int(central_fraction * image_width) + + crop_top = int(round((image_height - target_height) / 2.)) + crop_left = int(round((image_width - target_width) / 2.)) + + return crop(image, crop_top, crop_left, target_height, target_width) + + +def pad(image, padding, padding_value, mode): + + if isinstance(padding, int): + top = bottom = left = right = padding + + elif isinstance(padding, (tuple, list)): + if len(padding) == 2: + left = right = padding[0] + top = bottom = padding[1] + elif len(padding) == 4: + left = padding[0] + top = padding[1] + right = padding[2] + bottom = padding[3] + else: + raise TypeError("The size of the padding list or tuple should be 2 or 4." "But got {}".format(padding)) + else: + raise TypeError("Padding can be any of: a number, a tuple or list of size 2 or 4." "But got {}".format(padding)) + + if mode not in ['constant', 'edge', 'reflect', 'symmetric']: + raise TypeError("Padding mode should be 'constant', 'edge', 'reflect', or 'symmetric'.") + + if mode == 'constant': + if image.mode == 'P': + palette = image.getpalette() + image = ImageOps.expand(image, border=padding, fill=padding_value) + image.putpalette(palette) + return image + return ImageOps.expand(image, border=padding, fill=padding_value) + + if image.mode == 'P': + palette = image.getpalette() + image = np.asarray(image) + image = np.pad(image, ((top, bottom), (left, right)), mode) + image = Image.fromarray(image) + image.putpalette(palette) + return image + + image = np.asarray(image) + # RGB image + if len(image.shape) == 3: + image = np.pad(image, ((top, bottom), (left, right), (0, 0)), mode) + # Grayscale image + if len(image.shape) == 2: + image = np.pad(image, ((top, bottom), (left, right)), mode) + + return Image.fromarray(image) + + +def resize(image, size, method): + + if not (isinstance(size, int) or (isinstance(size, (list, tuple)) and len(size) == 2)): + raise TypeError('Size should be a single number or a list/tuple (h, w) of length 2.' 'Got {}.'.format(size)) + + if method not in ('nearest', 'bilinear', 'bicubic', 'box', 'lanczos', 'hamming'): + raise ValueError( + "Unknown resize method! resize method must be in " + "(\'nearest\',\'bilinear\',\'bicubic\',\'box\',\'lanczos\',\'hamming\')" + ) + if isinstance(size, int): + w, h = image.size + if (w <= h and w == size) or (h <= w and h == size): + return image + if w < h: + ow = size + oh = int(size * h / w) + return image.resize((ow, oh), _pil_interp_from_str[method]) + else: + oh = size + ow = int(size * w / h) + return image.resize((ow, oh), _pil_interp_from_str[method]) + else: + return image.resize(size[::-1], _pil_interp_from_str[method]) + + +def transpose(image, order): + + image = np.asarray(image) + if not (isinstance(order, (list, tuple)) and len(order) == 3): + raise TypeError("Order must be a list/tuple of length 3." "But got {}.".format(order)) + + image_shape = image.shape + if len(image_shape) == 2: + image = image[..., np.newaxis] + + image = image.transpose(order) + image = Image.fromarray(image) + return image + + +def hwc_to_chw(image): + + image_shape = image.shape + if len(image_shape) == 2: + image = image[..., np.newaxis] + + image = image.transpose((2, 0, 1)) + image = Image.fromarray(image) + return image + + +def chw_to_hwc(image): + + image_shape = image.shape + if len(image_shape) == 2: + image = image[..., np.newaxis] + + image = image.transpose((1, 2, 0)) + image = Image.fromarray(image) + return image + + +def rgb_to_hsv(image): + + return image.convert('HSV') + + +def hsv_to_rgb(image): + + return image.convert('RGB') + + +def rgb_to_gray(image, num_output_channels): + + if num_output_channels == 1: + img = image.convert('L') + elif num_output_channels == 3: + img = image.convert('L') + np_img = np.array(img, dtype=np.uint8) + np_img = np.dstack([np_img, np_img, np_img]) + img = Image.fromarray(np_img, 'RGB') + else: + raise ValueError('num_output_channels should be either 1 or 3') + + return img + + +def adjust_brightness(image, brightness_factor): + """Adjusts brightness of an Image. + + Args: + image (PIL.Image): PIL Image to be adjusted. + brightness_factor (float): How much to adjust the brightness. Can be + any non negative number. 0 gives a black image, 1 gives the + original image while 2 increases the brightness by a factor of 2. + + Returns: + PIL.Image: Brightness adjusted image. + + """ + if brightness_factor < 0: + raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor)) + + enhancer = ImageEnhance.Brightness(image) + image = enhancer.enhance(brightness_factor) + return image + + +def adjust_contrast(image, contrast_factor): + """Adjusts contrast of an Image. + + Args: + image (PIL.Image): PIL Image to be adjusted. + contrast_factor (float): How much to adjust the contrast. Can be any + non negative number. 0 gives a solid gray image, 1 gives the + original image while 2 increases the contrast by a factor of 2. + + Returns: + PIL.Image: Contrast adjusted image. + + """ + if contrast_factor < 0: + raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor)) + + enhancer = ImageEnhance.Contrast(image) + image = enhancer.enhance(contrast_factor) + return image + + +def adjust_hue(image, hue_factor): + """Adjusts hue of an image. + + The image hue is adjusted by converting the image to HSV and + cyclically shifting the intensities in the hue channel (H). + The image is then converted back to original image mode. + + `hue_factor` is the amount of shift in H channel and must be in the + interval `[-0.5, 0.5]`. + + Args: + image (PIL.Image): PIL Image to be adjusted. + hue_factor (float): How much to shift the hue channel. Should be in + [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in + HSV space in positive and negative direction respectively. + 0 means no shift. Therefore, both -0.5 and 0.5 will give an image + with complementary colors while 0 gives the original image. + + Returns: + PIL.Image: Hue adjusted image. + + """ + if not (-0.5 <= hue_factor <= 0.5): + raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor)) + + input_mode = image.mode + if input_mode in {'L', '1', 'I', 'F'}: + return image + h, s, v = image.convert('HSV').split() + + np_h = np.array(h, dtype=np.uint8) + # uint8 addition take cares of rotation across boundaries + with np.errstate(over='ignore'): + np_h += np.uint8(hue_factor * 255) + h = Image.fromarray(np_h, 'L') + + image = Image.merge('HSV', (h, s, v)).convert(input_mode) + return image + + +def adjust_saturation(image, saturation_factor): + """Adjusts color saturation of an image. + + Args: + image (PIL.Image): PIL Image to be adjusted. + saturation_factor (float): How much to adjust the saturation. 0 will + give a black and white image, 1 will give the original image while + 2 will enhance the saturation by a factor of 2. + + Returns: + PIL.Image: Saturation adjusted image. + + """ + if saturation_factor < 0: + raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor)) + enhancer = ImageEnhance.Color(image) + image = enhancer.enhance(saturation_factor) + return image + + +def hflip(image): + """Horizontally flips the given PIL Image. + + Args: + img (PIL.Image): Image to be flipped. + + Returns: + PIL.Image: Horizontall flipped image. + + """ + + return image.transpose(Image.FLIP_LEFT_RIGHT) + + +def vflip(image): + """Vertically flips the given PIL Image. + + Args: + img (PIL.Image): Image to be flipped. + + Returns: + PIL.Image: Vertically flipped image. + + """ + + return image.transpose(Image.FLIP_TOP_BOTTOM) + + +def padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value): + ''' + + Parameters + ---------- + image: + A PIL image to be padded size of (target_width, target_height) + offset_height: + Number of rows of padding_values to add on top. + offset_width: + Number of columns of padding_values to add on the left. + target_height: + Height of output image. + target_width: + Width of output image. + padding_value: + value to pad + + Returns: + PIL.Image: padded image + ------- + + ''' + if offset_height < 0: + raise ValueError('offset_height must be >= 0') + if offset_width < 0: + raise ValueError('offset_width must be >= 0') + + width, height = image.size + after_padding_width = target_width - offset_width - width + after_padding_height = target_height - offset_height - height + if after_padding_height < 0: + raise ValueError('image height must be <= target - offset') + if after_padding_width < 0: + raise ValueError('image width must be <= target - offset') + + return pad( + image, padding=(offset_width, offset_height, after_padding_width, after_padding_height), + padding_value=padding_value, mode='constant' + ) + + +def rotate(image, angle, interpolation, expand, center, fill): + """Rotates the image by angle. + + Args: + img (PIL.Image): Image to be rotated. + angle (float or int): In degrees degrees counter clockwise order. + interpolation (str, optional): Interpolation method. If omitted, or if the + image has only one channel, it is set to PIL.Image.NEAREST . when use pil backend, + support method are as following: + - "nearest": Image.NEAREST, + - "bilinear": Image.BILINEAR, + - "bicubic": Image.BICUBIC + expand (bool, optional): Optional expansion flag. + If true, expands the output image to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center (2-tuple, optional): Optional center of rotation. + Origin is the upper left corner. + Default is the center of the image. + fill (3-tuple or int): RGB pixel fill value for area outside the rotated image. + If int, it is used for all channels respectively. + + Returns: + PIL.Image: Rotated image. + + """ + c = 1 if image.mode == 'L' else 3 + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + return image.rotate(angle, _pil_interp_from_str[interpolation], expand, center, fillcolor=fill) + + +def get_affine_matrix(center, angle, translate, scale, shear): + + rot = math.radians(angle) + sx, sy = [math.radians(s) for s in shear] + + cx, cy = center + tx, ty = translate + + # RSS without scaling + a = math.cos(rot - sy) / math.cos(sy) + b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot) + c = math.sin(rot - sy) / math.cos(sy) + d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot) + + # Inverted rotation matrix with scale and shear + # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1 + matrix = [d, -b, 0.0, -c, a, 0.0] + matrix = [x / scale for x in matrix] + + # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1 + matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty) + matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty) + + # Apply center translation: C * RSS^-1 * C^-1 * T^-1 + matrix[2] += cx + matrix[5] += cy + + return matrix + + +def random_shear(image, degrees, interpolation, fill): + + c = 1 if image.mode == 'L' else 3 + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + w, h = image.size + center = (w / 2.0, h / 2.0) + shear = [np.random.uniform(degrees[0], degrees[1]), np.random.uniform(degrees[2], degrees[3])] + + interpolation = _pil_interp_from_str[interpolation] + matrix = get_affine_matrix(center=center, angle=0, translate=(0, 0), scale=1.0, shear=shear) + output_size = (w, h) + kwargs = {"fillcolor": fill} + return image.transform(output_size, Image.AFFINE, matrix, interpolation, **kwargs) + + +def random_shift(image, shift, interpolation, fill): + + c = 1 if image.mode == 'L' else 3 + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + w, h = image.size + center = (w / 2.0, h / 2.0) + hrg = shift[0] + wrg = shift[1] + tx = np.random.uniform(-hrg, hrg) * h + ty = np.random.uniform(-wrg, wrg) * w + matrix = get_affine_matrix(center=center, angle=0, translate=(tx, ty), scale=1.0, shear=(0, 0)) + print(matrix) + + output_size = (w, h) + kwargs = {"fillcolor": fill} + return image.transform(output_size, Image.AFFINE, matrix, interpolation, **kwargs) + + +def random_zoom(image, zoom, interpolation, fill): + + c = 1 if image.mode == 'L' else 3 + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + w, h = image.size + scale = np.random.uniform(zoom[0], zoom[1]) + center = (w / 2.0, h / 2.0) + + matrix = get_affine_matrix(center=center, angle=0, translate=(0, 0), scale=scale, shear=(0, 0)) + + output_size = (w, h) + kwargs = {"fillcolor": fill} + return image.transform(output_size, Image.AFFINE, matrix, interpolation, **kwargs) + + +def random_affine(image, degrees, shift, zoom, shear, interpolation, fill): + + c = 1 if image.mode == 'L' else 3 + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + w, h = image.size + angle = float(np.random.uniform(float(degrees[0]), float(degrees[1]))) + center = (w / 2.0, h / 2.0) + if shift is not None: + max_dx = float(shift[0] * w) + max_dy = float(shift[1] * h) + tx = int(round(np.random.uniform(-max_dx, max_dx))) + ty = int(round(np.random.uniform(-max_dy, max_dy))) + translations = (tx, ty) + else: + translations = (0, 0) + + if zoom is not None: + scale = float(np.random.uniform(zoom[0], zoom[1])) + else: + scale = 1.0 + + shear_x = shear_y = 0 + if shear is not None: + shear_x = float(np.random.uniform(shear[0], shear[1])) + if len(shear) == 4: + shear_y = float(np.random.uniform(shear[2], shear[3])) + shear = (shear_x, shear_y) + matrix = get_affine_matrix(center=center, angle=angle, translate=translations, scale=scale, shear=shear) + + output_size = (w, h) + kwargs = {"fillcolor": fill} + return image.transform(output_size, Image.AFFINE, matrix, interpolation, **kwargs) diff --git a/tensorlayer/dataflow/image/__init__.py b/tensorlayer/vision/load_vision_backend.py similarity index 66% rename from tensorlayer/dataflow/image/__init__.py rename to tensorlayer/vision/load_vision_backend.py index c0568ed..c816d3d 100644 --- a/tensorlayer/dataflow/image/__init__.py +++ b/tensorlayer/vision/load_vision_backend.py @@ -1,17 +1,16 @@ -#! /usr/bin/python +#!/usr/bin/env python # -*- coding: utf-8 -*- -from __future__ import absolute_import, division, print_function +from __future__ import absolute_import, division, print_function from tensorlayer.backend.ops.load_backend import BACKEND if BACKEND == 'tensorflow': - from .tensorflow_image import * + from .tensorflow_vision import * elif BACKEND == 'mindspore': - from .mindspore_image import * -elif BACKEND == 'paddle': - from .paddle_image import * -elif BACKEND == 'pytorch': + from .mindspore_vision import * +elif BACKEND == 'dragon': pass - +elif BACKEND == 'paddle': + from .paddle_vision import * else: raise NotImplementedError("This backend is not supported") diff --git a/tensorlayer/vision/mindspore_vision.py b/tensorlayer/vision/mindspore_vision.py new file mode 100644 index 0000000..b9d70a2 --- /dev/null +++ b/tensorlayer/vision/mindspore_vision.py @@ -0,0 +1,610 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import mindspore as ms +from . import functional_cv2 as F_cv2 +from . import functional_pil as F_pil +import mindspore.ops as P +from mindspore.numpy import std +from PIL import Image +import PIL +import numpy as np +import numbers +import random +import math + +__all__ = [ + 'central_crop', + 'to_tensor', + 'crop', + 'pad', + 'resize', + 'transpose', + 'hwc_to_chw', + 'chw_to_hwc', + 'rgb_to_hsv', + 'hsv_to_rgb', + 'rgb_to_gray', + 'adjust_brightness', + 'adjust_contrast', + 'adjust_hue', + 'adjust_saturation', + 'normalize', + 'hflip', + 'vflip', + 'padtoboundingbox', + 'standardize', + 'random_brightness', + 'random_contrast', + 'random_saturation', + 'random_hue', + 'random_crop', + 'random_resized_crop', + 'random_vflip', + 'random_hflip', + 'random_rotation', + 'random_shear', + 'random_shift', + 'random_zoom', + 'random_affine', +] + + +def _is_pil_image(image): + return isinstance(image, Image.Image) + + +def _is_tensor_image(image): + return isinstance(image, ms.Tensor) + + +def _is_numpy_image(image): + return isinstance(image, np.ndarray) and (image.ndim in {2, 3}) + + +def _get_image_size(img): + if _is_pil_image(img): + return img.size[::-1] + elif _is_numpy_image(img): + return img.shape[:2] + else: + raise TypeError("Unexpected type {}".format(type(img))) + + +def random_factor(factor, name, center=1, bound=(0, float('inf')), non_negative=True): + if isinstance(factor, numbers.Number): + if factor < 0: + raise ValueError('The input value of {} cannot be negative.'.format(name)) + factor = [center - factor, center + factor] + if non_negative: + factor[0] = max(0, factor[0]) + elif isinstance(factor, (tuple, list)) and len(factor) == 2: + if not bound[0] <= factor[0] <= factor[1] <= bound[1]: + raise ValueError( + "Please check your value range of {} is valid and " + "within the bound {}.".format(name, bound) + ) + else: + raise TypeError("Input of {} should be either a single value, or a list/tuple of " "length 2.".format(name)) + factor = np.random.uniform(factor[0], factor[1]) + return factor + + +def to_tensor(image, data_format='HWC'): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray. Got {}'.format(type(image))) + + image = np.asarray(image).astype('float32') + + if image.ndim == 2: + image = image[:, :, None] + + if data_format == 'CHW': + + image = np.transpose(image, (2, 0, 1)) + image = image / 255. + else: + image = image / 255. + + return image + + +def central_crop(image, size=None, central_fraction=None): + + if size is None and central_fraction is None: + raise ValueError('central_fraction and size can not be both None') + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + + return F_pil.center_crop(image, size, central_fraction) + + else: + + return F_cv2.center_crop(image, size, central_fraction) + + +def crop(image, offset_height, offset_width, target_height, target_width): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + + return F_pil.crop(image, offset_height, offset_width, target_height, target_width) + + else: + + return F_cv2.crop(image, offset_height, offset_width, target_height, target_width) + + +def pad(image, padding, padding_value, mode): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.pad(image, padding, padding_value, mode) + else: + return F_cv2.pad(image, padding, padding_value, mode) + + +def resize(image, size, method): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.resize(image, size, method) + else: + return F_cv2.resize(image, size, method) + + +def transpose(image, order): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.transpose(image, order) + else: + return F_cv2.transpose(image, order) + + +def hwc_to_chw(image): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.hwc_to_chw(image) + else: + return F_cv2.hwc_to_chw(image) + + +def chw_to_hwc(image): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.chw_to_hwc(image) + else: + return F_cv2.chw_to_hwc(image) + + +def rgb_to_hsv(image): + if not (_is_pil_image(image) or isinstance(image, np.ndarray) and (image.ndim == 3)): + raise TypeError('image should be PIL Image or ndarray with dim=3. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.rgb_to_hsv(image) + else: + return F_cv2.rgb_to_hsv(image) + + +def hsv_to_rgb(image): + if not (_is_pil_image(image) or isinstance(image, np.ndarray) and (image.ndim == 3)): + raise TypeError('image should be PIL Image or ndarray with dim=3. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.hsv_to_rgb(image) + else: + return F_cv2.hsv_to_rgb(image) + + +def rgb_to_gray(image, num_output_channels): + if not (_is_pil_image(image) or isinstance(image, np.ndarray) and (image.ndim == 3)): + raise TypeError('image should be PIL Image or ndarray with dim=3. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.rgb_to_gray(image, num_output_channels) + else: + return F_cv2.rgb_to_gray(image, num_output_channels) + + +def adjust_brightness(image, brightness_factor): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_brightness(image, brightness_factor) + else: + return F_cv2.adjust_brightness(image, brightness_factor) + + +def adjust_contrast(image, contrast_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_contrast(image, contrast_factor) + else: + return F_cv2.adjust_contrast(image, contrast_factor) + + +def adjust_hue(image, hue_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_hue(image, hue_factor) + else: + return F_cv2.adjust_hue(image, hue_factor) + + +def adjust_saturation(image, saturation_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_saturation(image, saturation_factor) + else: + return F_cv2.adjust_saturation(image, saturation_factor) + + +def hflip(image): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.hflip(image) + else: + return F_cv2.hflip(image) + + +def vflip(image): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.vflip(image) + else: + return F_cv2.vflip(image) + + +def padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value) + else: + return F_cv2.padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value) + + +def normalize(image, mean, std, data_format): + + if _is_pil_image(image): + image = np.asarray(image) + + image = image.astype('float32') + + if data_format == 'CHW': + num_channels = image.shape[0] + elif data_format == 'HWC': + num_channels = image.shape[2] + + if isinstance(mean, numbers.Number): + mean = (mean, ) * num_channels + elif isinstance(mean, (list, tuple)): + if len(mean) != num_channels: + raise ValueError("Length of mean must be 1 or equal to the number of channels({0}).".format(num_channels)) + if isinstance(std, numbers.Number): + std = (std, ) * num_channels + elif isinstance(std, (list, tuple)): + if len(std) != num_channels: + raise ValueError("Length of std must be 1 or equal to the number of channels({0}).".format(num_channels)) + mean = np.array(mean, dtype=image.dtype) + std = np.array(std, dtype=image.dtype) + + if data_format == 'CHW': + image = (image - mean[None, None, :]) / std[None, None, :] + elif data_format == 'HWC': + image = (image - mean[None, None, :]) / std[None, None, :] + + return image + + +def standardize(image): + ''' + Reference to tf.image.per_image_standardization(). + Linearly scales each image in image to have mean 0 and variance 1. + ''' + + if _is_pil_image(image): + image = np.asarray(image) + + image = image.astype('float32') + + num_pixels = image.size + image_mean = np.mean(image, keep_dims=False) + stddev = np.std(image, keep_dims=False) + min_stddev = 1.0 / np.sqrt(num_pixels) + adjusted_stddev = np.maximum(stddev, min_stddev) + + return (image - image_mean) / adjusted_stddev + + +def random_brightness(image, brightness_factor): + ''' + Perform a random brightness on the input image. + Parameters + ---------- + image: + Input images to adjust random brightness + brightness_factor: + Brightness adjustment factor (default=(1, 1)). Cannot be negative. + If it is a float, the factor is uniformly chosen from the range [max(0, 1-brightness), 1+brightness]. + If it is a sequence, it should be [min, max] for the range. + + Returns: + Adjusted image. + ------- + + ''' + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + brightness_factor = random_factor(brightness_factor, name='brightness') + + if _is_pil_image(image): + return F_pil.adjust_brightness(image, brightness_factor) + else: + return F_cv2.adjust_brightness(image, brightness_factor) + + +def random_contrast(image, contrast_factor): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + contrast_factor = random_factor(contrast_factor, name='contrast') + + if _is_pil_image(image): + return F_pil.adjust_contrast(image, contrast_factor) + else: + return F_cv2.adjust_contrast(image, contrast_factor) + + +def random_saturation(image, saturation_factor): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + saturation_factor = random_factor(saturation_factor, name='saturation') + + if _is_pil_image(image): + return F_pil.adjust_saturation(image, saturation_factor) + else: + return F_cv2.adjust_saturation(image, saturation_factor) + + +def random_hue(image, hue_factor): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + hue_factor = random_factor(hue_factor, name='hue', center=0, bound=(-0.5, 0.5), non_negative=False) + + if _is_pil_image(image): + return F_pil.adjust_hue(image, hue_factor) + else: + return F_cv2.adjust_hue(image, hue_factor) + + +def random_crop(image, size, padding, pad_if_needed, fill, padding_mode): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (tuple, list)) and len(size) == 2: + size = size + else: + raise ValueError('Size should be a int or a list/tuple with length of 2. ' 'But got {}'.format(size)) + + height, width = _get_image_size(image) + if padding is not None: + image = pad(image, padding, fill, padding_mode) + + if pad_if_needed and height < size[0]: + image = pad(image, (0, height - size[0]), fill, padding_mode) + + if pad_if_needed and width < size[1]: + image = pad(image, (width - size[1], 0), fill, padding_mode) + + height, width = _get_image_size(image) + target_height, target_width = size + + if height < target_height or width < target_width: + raise ValueError( + 'Crop size {} should be smaller than input image size {}. '.format( + (target_height, target_width), (height, width) + ) + ) + + offset_height = random.randint(0, height - target_height) + offset_width = random.randint(0, width - target_width) + + return crop(image, offset_height, offset_width, target_height, target_width) + + +def random_resized_crop(image, size, scale, ratio, interpolation): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (list, tuple)) and len(size) == 2: + size = size + else: + raise TypeError('Size should be a int or a list/tuple with length of 2.' 'But got {}.'.format(size)) + if not (isinstance(scale, (list, tuple)) and len(scale) == 2): + raise TypeError('Scale should be a list/tuple with length of 2.' 'But got {}.'.format(scale)) + if not (isinstance(ratio, (list, tuple)) and len(ratio) == 2): + raise TypeError('Scale should be a list/tuple with length of 2.' 'But got {}.'.format(ratio)) + + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + raise ValueError("Scale and ratio should be of kind (min, max)") + + def _get_param(image, scale, ratio): + height, width = _get_image_size(image) + area = height * width + log_ratio = tuple(math.log(x) for x in ratio) + for _ in range(10): + target_area = np.random.uniform(*scale) * area + aspect_ratio = math.exp(np.random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if 0 < w <= width and 0 < h <= height: + i = random.randint(0, height - h) + j = random.randint(0, width - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(ratio): + w = width + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = height + w = int(round(h * max(ratio))) + else: + # return whole image + w = width + h = height + i = (height - h) // 2 + j = (width - w) // 2 + return i, j, h, w + + offset_height, offset_width, target_height, target_width = _get_param(image, scale, ratio) + + image = crop(image, offset_height, offset_width, target_height, target_width) + image = resize(image, size, interpolation) + + return image + + +def random_vflip(image, prob): + + if random.random() < prob: + return vflip(image) + return image + + +def random_hflip(image, prob): + + if random.random() < prob: + return hflip(image) + return image + + +def random_rotation(image, degrees, interpolation, expand, center, fill): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number, it must be positive.' 'But got {}'.format(degrees)) + degrees = (-degrees, degrees) + elif not (isinstance(degrees, (list, tuple)) and len(degrees) == 2): + raise ValueError('If degrees is a list/tuple, it must be length of 2.' 'But got {}'.format(degrees)) + else: + if degrees[0] > degrees[1]: + raise ValueError('if degrees is a list/tuple, it should be (min, max).') + + angle = np.random.uniform(degrees[0], degrees[1]) + + if _is_pil_image(image): + return F_pil.rotate(image, angle, interpolation, expand, center, fill) + else: + return F_cv2.rotate(image, angle, interpolation, expand, center, fill) + + +def random_shear(image, degrees, interpolation, fill): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(degrees, numbers.Number): + degrees = (-degrees, degrees, 0, 0) + elif isinstance(degrees, (list, tuple)) and (len(degrees) == 2 or len(degrees) == 4): + if len(degrees) == 2: + degrees = (degrees[0], degrees[1], 0, 0) + else: + raise ValueError( + 'degrees should be a single number or a list/tuple with length in (2 ,4).' + 'But got {}'.format(degrees) + ) + + if _is_pil_image(image): + return F_pil.random_shear(image, degrees, interpolation, fill) + else: + return F_cv2.random_shear(image, degrees, interpolation, fill) + + +def random_shift(image, shift, interpolation, fill): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if not (isinstance(shift, (tuple, list)) and len(shift) == 2): + + raise ValueError('Shift should be a list/tuple with length of 2.' 'But got {}'.format(shift)) + + if _is_pil_image(image): + return F_pil.random_shift(image, shift, interpolation, fill) + else: + return F_cv2.random_shift(image, shift, interpolation, fill) + + +def random_zoom(image, zoom, interpolation, fill): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if not (isinstance(zoom, (tuple, list)) and len(zoom) == 2): + + raise ValueError('Zoom should be a list/tuple with length of 2.' 'But got {}'.format(zoom)) + if not (0 <= zoom[0] <= zoom[1]): + + raise ValueError('Zoom values should be positive, and zoom[1] should be greater than zoom[0].') + + if _is_pil_image(image): + return F_pil.random_zoom(image, zoom, interpolation, fill) + else: + return F_cv2.random_zoom(image, zoom, interpolation, fill) + + +def random_affine(image, degrees, shift, zoom, shear, interpolation, fill): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.random_affine(image, degrees, shift, zoom, shear, interpolation, fill) + else: + return F_cv2.random_affine(image, degrees, shift, zoom, shear, interpolation, fill) diff --git a/tensorlayer/vision/paddle_vision.py b/tensorlayer/vision/paddle_vision.py new file mode 100644 index 0000000..4c188ef --- /dev/null +++ b/tensorlayer/vision/paddle_vision.py @@ -0,0 +1,608 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import paddle +from . import functional_cv2 as F_cv2 +from . import functional_pil as F_pil +import sys +import math +import numbers +import warnings +import collections +import numpy as np +from PIL import Image +from numpy import sin, cos, tan +import paddle +import random + +__all__ = [ + 'central_crop', + 'to_tensor', + 'crop', + 'pad', + 'resize', + 'transpose', + 'hwc_to_chw', + 'chw_to_hwc', + 'rgb_to_hsv', + 'hsv_to_rgb', + 'rgb_to_gray', + 'adjust_brightness', + 'adjust_contrast', + 'adjust_hue', + 'adjust_saturation', + 'normalize', + 'hflip', + 'vflip', + 'padtoboundingbox', + 'standardize', + 'random_brightness', + 'random_contrast', + 'random_saturation', + 'random_hue', + 'random_crop', + 'random_resized_crop', + 'random_vflip', + 'random_hflip', + 'random_rotation', + 'random_shear', + 'random_shift', + 'random_zoom', + 'random_affine', +] + + +def _is_pil_image(img): + return isinstance(img, Image.Image) + + +def _is_tensor_image(img): + return isinstance(img, paddle.Tensor) + + +def _is_numpy_image(img): + return isinstance(img, np.ndarray) and (img.ndim in {2, 3}) + + +def to_tensor(img, data_format='HWC'): + + return paddle.vision.functional.to_tensor(img, data_format=data_format) + + +def _get_image_size(img): + if _is_pil_image(img): + return img.size[::-1] + elif _is_numpy_image(img): + return img.shape[:2] + else: + raise TypeError("Unexpected type {}".format(type(img))) + + +def random_factor(factor, name, center=1, bound=(0, float('inf')), non_negative=True): + if isinstance(factor, numbers.Number): + if factor < 0: + raise ValueError('The input value of {} cannot be negative.'.format(name)) + factor = [center - factor, center + factor] + if non_negative: + factor[0] = max(0, factor[0]) + elif isinstance(factor, (tuple, list)) and len(factor) == 2: + if not bound[0] <= factor[0] <= factor[1] <= bound[1]: + raise ValueError( + "Please check your value range of {} is valid and " + "within the bound {}.".format(name, bound) + ) + else: + raise TypeError("Input of {} should be either a single value, or a list/tuple of " "length 2.".format(name)) + factor = np.random.uniform(factor[0], factor[1]) + return factor + + +def central_crop(image, size=None, central_fraction=None): + + if size is None and central_fraction is None: + raise ValueError('central_fraction and size can not be both None') + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + + return F_pil.center_crop(image, size, central_fraction) + + else: + + return F_cv2.center_crop(image, size, central_fraction) + + +def crop(image, offset_height, offset_width, target_height, target_width): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + + return F_pil.crop(image, offset_height, offset_width, target_height, target_width) + + else: + + return F_cv2.crop(image, offset_height, offset_width, target_height, target_width) + + +def pad(image, padding, padding_value, mode): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.pad(image, padding, padding_value, mode) + else: + return F_cv2.pad(image, padding, padding_value, mode) + + +def resize(image, size, method): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.resize(image, size, method) + else: + return F_cv2.resize(image, size, method) + + +def transpose(image, order): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.transpose(image, order) + else: + return F_cv2.transpose(image, order) + + +def hwc_to_chw(image): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.hwc_to_chw(image) + else: + return F_cv2.hwc_to_chw(image) + + +def chw_to_hwc(image): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.chw_to_hwc(image) + else: + return F_cv2.chw_to_hwc(image) + + +def rgb_to_hsv(image): + + if not (_is_pil_image(image) or isinstance(image, np.ndarray) and (image.ndim == 3)): + raise TypeError('image should be PIL Image or ndarray with dim=3. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.rgb_to_hsv(image) + else: + return F_cv2.rgb_to_hsv(image) + + +def hsv_to_rgb(image): + if not (_is_pil_image(image) or isinstance(image, np.ndarray) and (image.ndim == 3)): + raise TypeError('image should be PIL Image or ndarray with dim=3. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.hsv_to_rgb(image) + else: + return F_cv2.hsv_to_rgb(image) + + +def rgb_to_gray(image, num_output_channels): + if not (_is_pil_image(image) or isinstance(image, np.ndarray) and (image.ndim == 3)): + raise TypeError('image should be PIL Image or ndarray with dim=3. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.rgb_to_gray(image, num_output_channels) + else: + return F_cv2.rgb_to_gray(image, num_output_channels) + + +def adjust_brightness(image, brightness_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_brightness(image, brightness_factor) + else: + return F_cv2.adjust_brightness(image, brightness_factor) + + +def adjust_contrast(image, contrast_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_contrast(image, contrast_factor) + else: + return F_cv2.adjust_contrast(image, contrast_factor) + + +def adjust_hue(image, hue_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_hue(image, hue_factor) + else: + return F_cv2.adjust_hue(image, hue_factor) + + +def adjust_saturation(image, saturation_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.adjust_saturation(image, saturation_factor) + else: + return F_cv2.adjust_saturation(image, saturation_factor) + + +def hflip(image): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.hflip(image) + else: + return F_cv2.hflip(image) + + +def vflip(image): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.vflip(image) + else: + return F_cv2.vflip(image) + + +def padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value) + else: + return F_cv2.padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value) + + +def normalize(image, mean, std, data_format): + + if not _is_tensor_image(image): + if _is_pil_image(image): + image = np.asarray(image) + image = paddle.to_tensor(image) + + image = image.astype('float32') + + if data_format == 'CHW': + num_channels = image.shape[0] + elif data_format == 'HWC': + num_channels = image.shape[2] + + if isinstance(mean, numbers.Number): + mean = (mean, ) * num_channels + elif isinstance(mean, (list, tuple)): + if len(mean) != num_channels: + raise ValueError("Length of mean must be 1 or equal to the number of channels({0}).".format(num_channels)) + if isinstance(std, numbers.Number): + std = (std, ) * num_channels + elif isinstance(std, (list, tuple)): + if len(std) != num_channels: + raise ValueError("Length of std must be 1 or equal to the number of channels({0}).".format(num_channels)) + if data_format == 'CHW': + std = np.array(std).reshape((-1, 1, 1)) + mean = np.array(mean).reshape((-1, 1, 1)) + elif data_format == 'HWC': + mean = np.array(mean) + std = np.array(std) + + mean = paddle.to_tensor(mean).astype('float32') + std = paddle.to_tensor(std).astype('float32') + + return (image - mean) / std + + +def standardize(image): + ''' + Reference to tf.image.per_image_standardization(). + Linearly scales each image in image to have mean 0 and variance 1. + ''' + if not _is_tensor_image(image): + if _is_pil_image(image): + image = np.asarray(image) + image = paddle.to_tensor(image) + + image = image.astype('float32') + num_pixels = paddle.to_tensor(image.size, dtype='float32') + image_mean = paddle.mean(image) + + stddev = paddle.std(image) + min_stddev = 1.0 / paddle.sqrt(num_pixels) + adjusted_stddev = paddle.maximum(stddev, min_stddev) + + return (image - image_mean) / adjusted_stddev + + +def random_brightness(image, brightness_factor): + ''' + Perform a random brightness on the input image. + Parameters + ---------- + image: + Input images to adjust random brightness + brightness_factor: + Brightness adjustment factor (default=(1, 1)). Cannot be negative. + If it is a float, the factor is uniformly chosen from the range [max(0, 1-brightness), 1+brightness]. + If it is a sequence, it should be [min, max] for the range. + + Returns: + Adjusted image. + ------- + + ''' + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + brightness_factor = random_factor(brightness_factor, name='brightness') + + if _is_pil_image(image): + return F_pil.adjust_brightness(image, brightness_factor) + else: + return F_cv2.adjust_brightness(image, brightness_factor) + + +def random_contrast(image, contrast_factor): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + contrast_factor = random_factor(contrast_factor, name='contrast') + + if _is_pil_image(image): + return F_pil.adjust_contrast(image, contrast_factor) + else: + return F_cv2.adjust_contrast(image, contrast_factor) + + +def random_saturation(image, saturation_factor): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + saturation_factor = random_factor(saturation_factor, name='saturation') + + if _is_pil_image(image): + return F_pil.adjust_saturation(image, saturation_factor) + else: + return F_cv2.adjust_saturation(image, saturation_factor) + + +def random_hue(image, hue_factor): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + hue_factor = random_factor(hue_factor, name='hue', center=0, bound=(-0.5, 0.5), non_negative=False) + + if _is_pil_image(image): + return F_pil.adjust_hue(image, hue_factor) + else: + return F_cv2.adjust_hue(image, hue_factor) + + +def random_crop(image, size, padding, pad_if_needed, fill, padding_mode): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (tuple, list)) and len(size) == 2: + size = size + else: + raise ValueError('Size should be a int or a list/tuple with length of 2. ' 'But got {}'.format(size)) + + if padding is not None: + + image = pad(image, padding, fill, padding_mode) + + h, w = _get_image_size(image) + + # pad the width if needed + if pad_if_needed and w < size[1]: + image = pad(image, (size[1] - w, 0), fill, padding_mode) + # pad the height if needed + if pad_if_needed and h < size[0]: + image = pad(image, (0, size[0] - h), fill, padding_mode) + + h, w = _get_image_size(image) + target_height, target_width = size + + if h < target_height or w < target_width: + raise ValueError( + 'Crop size {} should be smaller than input image size {}. '.format((target_height, target_width), (h, w)) + ) + + offset_height = random.randint(0, h - target_height) + offset_width = random.randint(0, w - target_width) + + return crop(image, offset_height, offset_width, target_height, target_width) + + +def random_resized_crop(image, size, scale, ratio, interpolation): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (list, tuple)) and len(size) == 2: + size = size + else: + raise TypeError('Size should be a int or a list/tuple with length of 2.' 'But got {}.'.format(size)) + if not (isinstance(scale, (list, tuple)) and len(scale) == 2): + raise TypeError('Scale should be a list/tuple with length of 2.' 'But got {}.'.format(scale)) + if not (isinstance(ratio, (list, tuple)) and len(ratio) == 2): + raise TypeError('Scale should be a list/tuple with length of 2.' 'But got {}.'.format(ratio)) + + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + raise ValueError("Scale and ratio should be of kind (min, max)") + + def _get_param(image, scale, ratio): + height, width = _get_image_size(image) + area = height * width + log_ratio = tuple(math.log(x) for x in ratio) + for _ in range(10): + target_area = np.random.uniform(*scale) * area + aspect_ratio = math.exp(np.random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if 0 < w <= width and 0 < h <= height: + i = random.randint(0, height - h) + j = random.randint(0, width - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(ratio): + w = width + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = height + w = int(round(h * max(ratio))) + else: + # return whole image + w = width + h = height + i = (height - h) // 2 + j = (width - w) // 2 + return i, j, h, w + + offset_height, offset_width, target_height, target_width = _get_param(image, scale, ratio) + + image = crop(image, offset_height, offset_width, target_height, target_width) + image = resize(image, size, interpolation) + + return image + + +def random_vflip(image, prob): + + if random.random() < prob: + return vflip(image) + return image + + +def random_hflip(image, prob): + + if random.random() < prob: + return hflip(image) + return image + + +def random_rotation(image, degrees, interpolation, expand, center, fill): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number, it must be positive.' 'But got {}'.format(degrees)) + degrees = (-degrees, degrees) + elif not (isinstance(degrees, (list, tuple)) and len(degrees) == 2): + raise ValueError('If degrees is a list/tuple, it must be length of 2.' 'But got {}'.format(degrees)) + else: + if degrees[0] > degrees[1]: + raise ValueError('if degrees is a list/tuple, it should be (min, max).') + + angle = np.random.uniform(degrees[0], degrees[1]) + + if _is_pil_image(image): + return F_pil.rotate(image, angle, interpolation, expand, center, fill) + else: + return F_cv2.rotate(image, angle, interpolation, expand, center, fill) + + +def random_shear(image, degrees, interpolation, fill): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if isinstance(degrees, numbers.Number): + degrees = (-degrees, degrees, 0, 0) + elif isinstance(degrees, (list, tuple)) and (len(degrees) == 2 or len(degrees) == 4): + if len(degrees) == 2: + degrees = (degrees[0], degrees[1], 0, 0) + else: + raise ValueError( + 'degrees should be a single number or a list/tuple with length in (2 ,4).' + 'But got {}'.format(degrees) + ) + + if _is_pil_image(image): + return F_pil.random_shear(image, degrees, interpolation, fill) + else: + return F_cv2.random_shear(image, degrees, interpolation, fill) + + +def random_shift(image, shift, interpolation, fill): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if not (isinstance(shift, (tuple, list)) and len(shift) == 2): + + raise ValueError('Shift should be a list/tuple with length of 2.' 'But got {}'.format(shift)) + + if _is_pil_image(image): + return F_pil.random_shift(image, shift, interpolation, fill) + else: + return F_cv2.random_shift(image, shift, interpolation, fill) + + +def random_zoom(image, zoom, interpolation, fill): + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if not (isinstance(zoom, (tuple, list)) and len(zoom) == 2): + + raise ValueError('Zoom should be a list/tuple with length of 2.' 'But got {}'.format(zoom)) + if not (0 <= zoom[0] <= zoom[1]): + + raise ValueError('Zoom values should be positive, and zoom[1] should be greater than zoom[0].') + + if _is_pil_image(image): + return F_pil.random_zoom(image, zoom, interpolation, fill) + else: + return F_cv2.random_zoom(image, zoom, interpolation, fill) + + +def random_affine(image, degrees, shift, zoom, shear, interpolation, fill): + + if not (_is_pil_image(image) or _is_numpy_image(image)): + raise TypeError('image should be PIL Image or ndarray with dim=[2 or 3]. Got {}'.format(type(image))) + + if _is_pil_image(image): + return F_pil.random_affine(image, degrees, shift, zoom, shear, interpolation, fill) + else: + return F_cv2.random_affine(image, degrees, shift, zoom, shear, interpolation, fill) diff --git a/tensorlayer/vision/tensorflow_vision.py b/tensorlayer/vision/tensorflow_vision.py new file mode 100644 index 0000000..a67ca0d --- /dev/null +++ b/tensorlayer/vision/tensorflow_vision.py @@ -0,0 +1,1396 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import tensorflow as tf +import numpy as np +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import array_ops, random_ops +from tensorflow.python.framework import ops +from tensorflow.python.ops.image_ops_impl import _AssertAtLeast3DImage +from tensorflow.python.framework import dtypes +from tensorflow.python.ops.image_ops_impl import convert_image_dtype +import numbers +import PIL +from PIL import Image +import math +import scipy +from scipy import ndimage +__all__ = [ + 'central_crop', + 'to_tensor', + 'crop', + 'pad', + 'resize', + 'transpose', + 'hwc_to_chw', + 'chw_to_hwc', + 'rgb_to_hsv', + 'hsv_to_rgb', + 'rgb_to_gray', + 'adjust_brightness', + 'adjust_contrast', + 'adjust_hue', + 'adjust_saturation', + 'normalize', + 'hflip', + 'vflip', + 'padtoboundingbox', + 'standardize', + 'random_brightness', + 'random_contrast', + 'random_saturation', + 'random_hue', + 'random_crop', + 'random_resized_crop', + 'random_vflip', + 'random_hflip', + 'random_rotation', + 'random_shear', + 'random_shift', + 'random_zoom', + 'random_affine', +] + + +def _is_pil_image(image): + return isinstance(image, Image.Image) + + +def _is_numpy_image(image): + return isinstance(image, np.ndarray) and (image.ndim in {2, 3}) + + +def _get_image_size(image): + image_shape = image.get_shape() + if image_shape.ndims == 3: + height, width, channels = image_shape + return height, width + elif image_shape.ndims == 4: + batch, height, width, channels = image_shape + return height, width + + +def random_factor(factor, name, center=1, bound=(0, float('inf')), non_negative=True): + if isinstance(factor, numbers.Number): + if factor < 0: + raise ValueError('The input value of {} cannot be negative.'.format(name)) + factor = [center - factor, center + factor] + if non_negative: + factor[0] = max(0, factor[0]) + elif isinstance(factor, (tuple, list)) and len(factor) == 2: + if not bound[0] <= factor[0] <= factor[1] <= bound[1]: + raise ValueError( + "Please check your value range of {} is valid and " + "within the bound {}.".format(name, bound) + ) + else: + raise TypeError("Input of {} should be either a single value, or a list/tuple of " "length 2.".format(name)) + factor = np.random.uniform(factor[0], factor[1]) + return factor + + +def central_crop(image, size=None, central_fraction=None): + ''' + + Parameters + ---------- + image : + input Either a 3-D float Tensor of shape [height, width, depth], + or a 4-D Tensor of shape [batch_size, height, width, depth]. + central_fraction : + float (0, 1], fraction of size to crop + size: + size (Union[int, sequence]) – The output size of the cropped image. If size is an integer, a square crop of size (size, size) is returned. + If size is a sequence of length 2, it should be (height, width). + Returns : + 3-D / 4-D float Tensor, as per the input. + ------- + + ''' + if size is None and central_fraction is None: + raise ValueError('central_fraction and size can not be both None') + + if size is not None: + if not isinstance(size, (int, list, tuple)) or (isinstance(size, (list, tuple)) and len(size) != 2): + raise ValueError( + "Size should be a single integer or a list/tuple (h, w) of length 2.But" + "got {}.".format(type(size)) + ) + if isinstance(size, int): + target_height = size + target_width = size + else: + target_height = size[0] + target_width = size[1] + image = ops.convert_to_tensor(image, name='image') + rank = image.get_shape().ndims + if rank != 3 and rank != 4: + raise ValueError( + '`image` should either be a Tensor with rank = 3 or ' + 'rank = 4. Had rank = {}.'.format(rank) + ) + + def _get_dim(tensor, idx): + static_shape = tensor.get_shape().dims[idx].value + if static_shape is not None: + return static_shape, False + return array_ops.shape(tensor)[idx], True + + if rank == 3: + img_h, dynamic_h = _get_dim(image, 0) + img_w, dynamic_w = _get_dim(image, 1) + img_d = image.get_shape()[2] + else: + img_bs = image.get_shape()[0] + img_h, dynamic_h = _get_dim(image, 1) + img_w, dynamic_w = _get_dim(image, 2) + img_d = image.get_shape()[3] + + bbox_h_size = target_height + bbox_w_size = target_width + + if dynamic_h: + img_hd = math_ops.cast(img_h, dtypes.float64) + target_height = math_ops.cast(target_height, dtypes.float64) + bbox_h_start = math_ops.cast((img_hd - target_height) / 2, dtypes.int32) + else: + img_hd = float(img_h) + target_height = float(target_height) + bbox_h_start = int((img_hd - target_height) / 2) + + if dynamic_w: + img_wd = math_ops.cast(img_w, dtypes.float64) + target_width = math_ops.cast(target_width, dtypes.float64) + bbox_w_start = math_ops.cast((img_wd - target_width) / 2, dtypes.int32) + else: + img_wd = float(img_w) + target_width = float(target_width) + bbox_w_start = int((img_wd - target_width) / 2) + + if rank == 3: + bbox_begin = array_ops.stack([bbox_h_start, bbox_w_start, 0]) + bbox_size = array_ops.stack([bbox_h_size, bbox_w_size, -1]) + else: + bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0]) + bbox_size = array_ops.stack([-1, bbox_h_size, bbox_w_size, -1]) + + image = array_ops.slice(image, bbox_begin, bbox_size) + + if rank == 3: + image.set_shape([None if dynamic_h else bbox_h_size, None if dynamic_w else bbox_w_size, img_d]) + else: + image.set_shape([img_bs, None if dynamic_h else bbox_h_size, None if dynamic_w else bbox_w_size, img_d]) + return image + + elif central_fraction is not None: + return tf.image.central_crop(image, central_fraction) + + +def to_tensor(img, data_format): + '''Converts a ``image`` to tf.Tensor. + + Parameters + ---------- + img: + Image to be converted to tensor. + data_format: + Data format of output tensor, should be 'HWC' or + 'CHW'. Default: 'HWC'. + + Returns: + Tensor: Converted image. + ------- + + ''' + if not (_is_pil_image(img) or _is_numpy_image(img)): + raise TypeError('img should be PIL Image or ndarray. But got {}'.format(type(img))) + + if _is_pil_image(img): + # PIL Image + if img.mode == 'I': + image = tf.convert_to_tensor(np.array(img, np.int32, copy=False)) + elif img.mode == 'I;16': + # cast and reshape not support int16 + image = tf.convert_to_tensor(np.array(img, np.int32, copy=False)) + elif img.mode == 'F': + image = tf.convert_to_tensor(np.array(img, np.float32, copy=False)) + elif img.mode == '1': + image = 255 * tf.convert_to_tensor(np.array(img, np.uint8, copy=False)) + else: + image = tf.convert_to_tensor(np.array(img, copy=False)) + + if img.mode == 'YCbCr': + nchannel = 3 + elif img.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(img.mode) + + dtype = image.dtype + if dtype == 'tf.uint8': + image = tf.cast(image, tf.float32) / 255. + + image = tf.reshape(image, shape=[img.size[1], img.size[0], nchannel]) + if data_format == 'CHW': + image = tf.transpose(image, perm=[2, 0, 1]) + return image + else: + if img.ndim == 2: + img = img[:, :, None] + + if data_format == 'CHW': + img = tf.convert_to_tensor(img.transpose((2, 0, 1))) + else: + img = tf.convert_to_tensor(img) + + dtype = img.dtype + if dtype == 'tf.uint8': + img = tf.cast(img, tf.float32) / 255. + return img + + +def crop(image, offset_height, offset_width, target_height, target_width): + + return tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width) + + +def pad(image, padding, padding_value, mode): + ''' + + Parameters + ---------- + image: + A 3-D or 4-D Tensor. + padding: + An integer or a list/tuple. If a single number is provided, pad all borders with this value. + If a tuple or list of 2 values is provided, pad the left and right with the first value and the top and bottom with the second value. + If 4 values are provided as a list or tuple, pad the (left , top, right, bottom) respectively. + padding_value: + In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor. + mode: + One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) + Returns: + A padded Tensor. Has the same type as tensor. + ------- + + ''' + image = ops.convert_to_tensor(image, name='image') + image_shape = image.get_shape() + if len(image_shape) == 3: + batch_size = 0 + elif len(image_shape) == 4: + batch_size = image_shape[0] + else: + raise TypeError('Image must be a 3-D tensor or 4-D tensor.') + + if isinstance(padding, int): + padding = ((padding, padding), (padding, padding)) + elif isinstance(padding, list) or isinstance(padding, tuple): + if len(padding) == 2: + padding = ((padding[1], padding[1]), (padding[0], padding[0])) + elif len(padding) == 4: + padding = ((padding[1], padding[3]), (padding[0], padding[2])) + else: + raise ValueError('The length of padding should be 2 or 4, but got {}.'.format(len(padding))) + else: + raise TypeError('Padding should be an integer or a list/tuple, but got {}.'.format(type(padding))) + + if batch_size == 0: + padding = (padding[0], padding[1], (0, 0)) + else: + padding = ((0, 0), padding[0], padding[1], (0, 0)) + + return tf.pad(image, padding, mode=mode, constant_values=padding_value) + + +def resize(image, size, method): + ''' + + Parameters + ---------- + images: + Input images to resize + size: + The output size of the resized image. + If size is an integer, smaller edge of the image will be resized to this value with + the same image aspect ratio. + If size is a sequence of (height, width), this will be the desired output size. + method: + An image.ResizeMethod, or string equivalent shoulid be in + (bilinear, lanczos3, lanczos5, bicubic, gaussian, nearest, area, mitchellcubic). + Defaults to bilinear. + preserve_aspect_ratio: + Whether to preserve the aspect ratio. + Returns: + resized images + ------- + + ''' + if not (isinstance(size, int) or (isinstance(size, (list, tuple)) and len(size) == 2)): + raise TypeError('Size should be a single number or a list/tuple (h, w) of length 2.' 'Got {}.'.format(size)) + image = ops.convert_to_tensor(image) + orig_dtype = image.dtype + if orig_dtype not in [dtypes.float16, dtypes.float32]: + image = convert_image_dtype(image, dtypes.float32) + + if image.get_shape().ndims == 3: + h, w, _ = image.get_shape().as_list() + elif image.get_shape().ndims == 4: + _, h, w, _ = image.get_shape().as_list() + + if isinstance(size, int): + if (w <= h and w == size) or (h <= w and h == size): + size = (h, w) + if w < h: + target_w = size + target_h = int(size * h / w) + size = (target_h, target_w) + else: + target_h = size + target_w = int(size * w / h) + size = (target_h, target_w) + image = tf.image.resize(image, size, method, preserve_aspect_ratio=False) + return convert_image_dtype(image, orig_dtype, saturate=True) + + +def transpose(image, order): + image = ops.convert_to_tensor(image) + shape = image.get_shape() + if shape.ndims == 3 or shape.ndims is None: + if len(order) != 3: + raise ValueError('if image is 3-D tensor, order should be a list/tuple with length of 3') + return array_ops.transpose(image, order) + elif shape.ndims == 4: + if len(order) != 4: + raise ValueError('if image is 4-D tensor, order should be a list/tuple with length of 4') + return array_ops.transpose(image, order) + else: + raise ValueError('\'image\' must have either 3 or 4 dimensions.') + + +def hwc_to_chw(image): + + if (len(image.shape) == 3): + return transpose(image, (2, 0, 1)) + elif (len(image.shape) == 4): + return transpose(image, (0, 3, 1, 2)) + else: + raise ValueError('\'image\' must have either 3 or 4 dimensions.') + + +def chw_to_hwc(image): + + if (len(image.shape) == 3): + return transpose(image, (1, 2, 0)) + elif (len(image.shape) == 4): + return transpose(image, (0, 2, 3, 1)) + else: + raise ValueError('\'image\' must have either 3 or 4 dimensions.') + + +def rgb_to_hsv(image): + + return tf.image.rgb_to_hsv(image) + + +def hsv_to_rgb(image): + + return tf.image.hsv_to_rgb(image) + + +def rgb_to_gray(image, num_output_channels): + + if num_output_channels not in (1, 3): + raise ValueError('num_output_channels should be either 1 or 3') + + image = ops.convert_to_tensor(image, name='image') + orig_dtype = image.dtype + flt_image = convert_image_dtype(image, dtypes.float32) + rgb_weights = [0.2989, 0.5870, 0.1140] + gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1]) + gray_float = array_ops.expand_dims(gray_float, -1) + if num_output_channels == 3: + gray_float = array_ops.stack([gray_float, gray_float, gray_float], axis=2) + return convert_image_dtype(gray_float, orig_dtype) + + +def adjust_brightness(image, brightness_factor): + ''' + Parameters + ---------- + images: + Input images to adjust brightness + brightness_factor(float): How much to adjust the brightness. Can be + any non negative number. 0 gives a black image, 1 gives the + original image while 2 increases the brightness by a factor of 2. + Returns: + adjusted images + ------- + ''' + if brightness_factor < 0: + raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor)) + + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + + orig_dtype = image.dtype + if orig_dtype not in [dtypes.float16, dtypes.float32]: + image = convert_image_dtype(image, dtypes.float32) + + brightness_factor = math_ops.cast(brightness_factor, image.dtype) + image_zeros = tf.zeros_like(image) + adjusted = brightness_factor * image + (1.0 - brightness_factor) * image_zeros + adjusted = tf.clip_by_value(adjusted, clip_value_min=0, clip_value_max=1.0) + return convert_image_dtype(adjusted, orig_dtype, saturate=True) + + +def adjust_contrast(image, contrast_factor): + ''' + Parameters + ---------- + images: + Input images to adjust contrast + contrast_factor(float): How much to adjust the contrast. Can be + any non negative number. 0 gives a gray image, 1 gives the + original image while 2 increases the contrast by a factor of 2. + Returns: + adjusted images + ------- + ''' + if contrast_factor < 0: + raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor)) + + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + + orig_dtype = image.dtype + if orig_dtype not in [dtypes.float16, dtypes.float32]: + image = convert_image_dtype(image, dtypes.float32) + + contrast_factor = math_ops.cast(contrast_factor, image.dtype) + mean = tf.math.reduce_mean(tf.image.rgb_to_grayscale(image), keepdims=True) + adjusted = contrast_factor * image + (1 - contrast_factor) * mean + adjusted = tf.clip_by_value(adjusted, clip_value_min=0, clip_value_max=1.0) + return convert_image_dtype(adjusted, orig_dtype, saturate=True) + + +def adjust_hue(image, hue_factor): + ''' + Parameters + ---------- + images(Tensor): + Input images to adjust hue + hue_factor(float): How much to shift the hue channel. Should be in + [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in + HSV space in positive and negative direction respectively. + 0 means no shift. Therefore, both -0.5 and 0.5 will give an image + with complementary colors while 0 gives the original image. + Returns(Tensor): + Adjusted images + ------- + ''' + if not (-0.5 <= hue_factor <= 0.5): + raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor)) + + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + + orig_dtype = image.dtype + if orig_dtype not in [dtypes.float16, dtypes.float32]: + image = convert_image_dtype(image, dtypes.float32) + + hue_factor = math_ops.cast(hue_factor, image.dtype) + image = tf.image.rgb_to_hsv(image) + h, s, v = tf.split(image, num_or_size_splits=[1, 1, 1], axis=2) + h = (h + hue_factor) % 1.0 + image = tf.concat((h, s, v), axis=2) + adjusted = tf.image.hsv_to_rgb(image) + + return convert_image_dtype(adjusted, orig_dtype, saturate=True) + + +def adjust_saturation(image, saturation_factor): + ''' + Parameters + ---------- + images(Tensor): + Input images to adjust saturation + contrast_factor(float): How much to adjust the saturation. 0 will + give a black and white image, 1 will give the original image while + 2 will enhance the saturation by a factor of 2. + Returns(Tensor): + Adjusted images + ------- + ''' + if saturation_factor < 0: + raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor)) + + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + + orig_dtype = image.dtype + if orig_dtype not in [dtypes.float16, dtypes.float32]: + image = convert_image_dtype(image, dtypes.float32) + + saturation_factor = math_ops.cast(saturation_factor, image.dtype) + gray_image = tf.image.rgb_to_grayscale(image) + adjusted = saturation_factor * image + (1 - saturation_factor) * gray_image + adjusted = tf.clip_by_value(adjusted, clip_value_min=0, clip_value_max=1.0) + return convert_image_dtype(adjusted, orig_dtype, saturate=True) + + +def hflip(image): + ''' + + Parameters + ---------- + image(Tensor): + Input images to flip an image horizontally (left to right) + + Returns(Tensor): + Flipped images + ------- + + ''' + return tf.image.flip_left_right(image) + + +def vflip(image): + ''' + + Parameters + ---------- + image(Tensor): + Input images to flip an image vertically (up to down) + + Returns(Tensor): + Flipped images + ------- + + ''' + return tf.image.flip_up_down(image) + + +def padtoboundingbox(image, offset_height, offset_width, target_height, target_width, padding_value): + ''' + + Parameters + ---------- + image: + 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor + of shape `[height, width, channels]`. + offset_height: + Number of rows of padding_values to add on top. + offset_width: + Number of columns of padding_values to add on the left. + target_height: + Height of output image. + target_width: + Width of output image. + padding_value: + value to pad + + Returns: + If `image` was 4-D, a 4-D float Tensor of shape + `[batch, target_height, target_width, channels]` + If `image` was 3-D, a 3-D float Tensor of shape + `[target_height, target_width, channels]` + ------- + + ''' + image = ops.convert_to_tensor(image, name='image') + + if offset_height < 0: + raise ValueError('offset_height must be >= 0') + if offset_width < 0: + raise ValueError('offset_width must be >= 0') + + image_shape = image.get_shape() + if image_shape.ndims == 3: + height, width, channels = image.get_shape() + elif image_shape.ndims == 4: + batch, height, width, channels = image.get_shape() + else: + raise ValueError('\'image\' (shape %s) must have either 3 or 4 dimensions.' % image_shape) + + after_padding_width = target_width - offset_width - width + after_padding_height = target_height - offset_height - height + if after_padding_height < 0: + raise ValueError('image height must be <= target - offset') + if after_padding_width < 0: + raise ValueError('image width must be <= target - offset') + + return pad( + image, padding=(offset_width, offset_height, after_padding_width, after_padding_height), + padding_value=padding_value, mode='constant' + ) + + +def normalize(image, mean, std, data_format): + ''' + Parameters + ---------- + image: + An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. + mean: + List or tuple of mean values for each channel, with respect to channel order. + std: + List or tuple of standard deviations for each channel. + channel_mode: + Decide to implement standardization on whole image or each channel of image. + Returns: + A Tensor with the same shape and dtype as image. + ------- + ''' + image = ops.convert_to_tensor(image, name='image') + image = math_ops.cast(image, dtype=tf.float32) + image = _AssertAtLeast3DImage(image) + + if data_format == 'CHW': + num_channels = image.shape[0] + elif data_format == 'HWC': + num_channels = image.shape[2] + + if isinstance(mean, numbers.Number): + mean = (mean, ) * num_channels + elif isinstance(mean, (list, tuple)): + if len(mean) != num_channels: + raise ValueError("Length of mean must be 1 or equal to the number of channels({0}).".format(num_channels)) + if isinstance(std, numbers.Number): + std = (std, ) * num_channels + elif isinstance(std, (list, tuple)): + if len(std) != num_channels: + raise ValueError("Length of std must be 1 or equal to the number of channels({0}).".format(num_channels)) + + if data_format == 'CHW': + std = np.float32(np.array(std).reshape((-1, 1, 1))) + mean = np.float32(np.array(mean).reshape((-1, 1, 1))) + elif data_format == 'HWC': + mean = np.float32(np.array(mean).reshape((1, 1, -1))) + std = np.float32(np.array(std).reshape((1, 1, -1))) + + mean = ops.convert_to_tensor(mean) + mean = math_ops.cast(mean, dtype=tf.float32) + std = ops.convert_to_tensor(std) + std = math_ops.cast(std, dtype=tf.float32) + image -= mean + image = math_ops.divide(image, std) + return image + + +def standardize(image): + ''' + Reference to tf.image.per_image_standardization(). + Linearly scales each image in image to have mean 0 and variance 1. + + Parameters + ---------- + image: + An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. + + Returns: + A Tensor with the same shape as image and its dtype is float32. + ------- + + ''' + image = ops.convert_to_tensor(image, name='image') + image = math_ops.cast(image, dtype=tf.float32) + return tf.image.per_image_standardization(image) + + +def random_brightness(image, brightness_factor): + ''' + Perform a random brightness on the input image. + Parameters + ---------- + image: + Input images to adjust random brightness + brightness_factor: + Brightness adjustment factor (default=(1, 1)). Cannot be negative. + If it is a float, the factor is uniformly chosen from the range [max(0, 1-brightness), 1+brightness]. + If it is a sequence, it should be [min, max] for the range. + + Returns: + Adjusted image. + ------- + + ''' + brightness_factor = random_factor(brightness_factor, name='brightness') + + return adjust_brightness(image, brightness_factor) + + +def random_contrast(image, contrast_factor): + ''' + Perform a random contrast on the input image. + Parameters + ---------- + image: + Input images to adjust random contrast + contrast_factor: + Contrast adjustment factor (default=(1, 1)). Cannot be negative. + If it is a float, the factor is uniformly chosen from the range [max(0, 1-contrast), 1+contrast]. + If it is a sequence, it should be [min, max] for the range. + + Returns: + Adjusted image. + ------- + + ''' + contrast_factor = random_factor(contrast_factor, name='contrast') + + return adjust_contrast(image, contrast_factor) + + +def random_saturation(image, saturation_factor): + ''' + Perform a random saturation on the input image. + Parameters + ---------- + image: + Input images to adjust random saturation + saturation_factor: + Saturation adjustment factor (default=(1, 1)). Cannot be negative. + If it is a float, the factor is uniformly chosen from the range [max(0, 1-saturation), 1+saturation]. + If it is a sequence, it should be [min, max] for the range. + + Returns: + Adjusted image. + ------- + + ''' + + saturation_factor = random_factor(saturation_factor, name='saturation') + + return adjust_saturation(image, saturation_factor) + + +def random_hue(image, hue_factor): + ''' + Perform a random contrast on the input image. + Parameters + ---------- + image: + Input images to adjust random contrast + brightness_factor: + Contrast adjustment factor (default=(1, 1)). Cannot be negative. + If it is a float, the factor is uniformly chosen from the range [max(0, 1-contrast), 1+contrast]. + If it is a sequence, it should be [min, max] for the range. + + Returns: + Adjusted image. + ------- + + ''' + hue_factor = random_factor(hue_factor, name='hue', center=0, bound=(-0.5, 0.5), non_negative=False) + + return adjust_hue(image, hue_factor) + + +def random_crop(image, size, padding, pad_if_needed, fill, padding_mode): + ''' + + Parameters + ---------- + image: + Input images to crop and pad if needed. + size: + Desired output size of the crop. If size is an int instead of sequence like (h, w), + a square crop (size, size) is made. If provided a sequence of length 1, + it will be interpreted as (size[0], size[0]). + padding: + Optional, padding on each border of the image. Default is None. + If a single int is provided this is used to pad all borders. + If sequence of length 2 is provided this is the padding on left/right and top/bottom respectively. + If a sequence of length 4 is provided this is the padding for the left, top, right and bottom borders respectively. + pad_if_needed: + It will pad the image if smaller than the desired size to avoid raising an exception. + Since cropping is done after padding, the padding seems to be done at a random offset. + fill: + Pixel fill value for constant fill. Default is 0. + padding_mode: + Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. + + Returns: + cropped images. + ------- + + ''' + image = ops.convert_to_tensor(image, name='image') + _AssertAtLeast3DImage(image) + + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (tuple, list)) and len(size) == 2: + size = size + else: + raise ValueError('Size should be a int or a list/tuple with length of 2. ' 'But got {}'.format(size)) + + size = ops.convert_to_tensor(size, dtype=dtypes.int32, name='size') + if padding is not None: + image = pad(image, padding, fill, padding_mode) + + image_shape = image.get_shape() + if image_shape.ndims == 3: + height, width, channels = image_shape + elif image_shape.ndims == 4: + batch, height, width, channels = image_shape + + if pad_if_needed and height < size[0]: + image = pad(image, (0, size[0] - height), fill, padding_mode) + if pad_if_needed and width < size[1]: + image = pad(image, (size[1] - width, 0), fill, padding_mode) + + image_shape = image.get_shape() + if image_shape.ndims == 3: + height, width, channels = image_shape + elif image_shape.ndims == 4: + batch, height, width, channels = image_shape + + target_height, target_width = size + if height < target_height or width < target_width: + raise ValueError( + 'Crop size {} should be smaller than input image size {}. '.format( + (target_height, target_width), (height, width) + ) + ) + + if target_height == height and target_width == width: + return crop(image, 0, 0, target_height, target_width) + + offset_height = random_ops.random_uniform([], minval=0, maxval=height - target_height + 1, dtype=size.dtype) + + offset_width = random_ops.random_uniform([], minval=0, maxval=width - target_width + 1, dtype=size.dtype) + + return crop(image, offset_height, offset_width, target_height, target_width) + + +def random_resized_crop(image, size, scale, ratio, interpolation): + '''Crop the given image to random size and aspect ratio. + + Parameters + ---------- + image: + 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. + size: + Target size of output image, with (height, width) shape. if size is int, target size will be (size, size). + scale: + Range of size of the origin size cropped. Default: (0.08, 1.0) + ratio: + Range of aspect ratio of the origin aspect ratio cropped. Default: (0.75, 1.33) + interpolation: + Interpolation method. Default: 'bilinear'. + + Returns: + Randomly cropped and resized image. + ------- + + ''' + + if isinstance(size, int): + size = (size, size) + elif isinstance(size, (list, tuple)) and len(size) == 2: + size = size + else: + raise TypeError('Size should be a int or a list/tuple with length of 2.' 'But got {}.'.format(size)) + if not (isinstance(scale, (list, tuple)) and len(scale) == 2): + raise TypeError('Scale should be a list/tuple with length of 2.' 'But got {}.'.format(scale)) + if not (isinstance(ratio, (list, tuple)) and len(ratio) == 2): + raise TypeError('Scale should be a list/tuple with length of 2.' 'But got {}.'.format(ratio)) + + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + raise ValueError("Scale and ratio should be of kind (min, max)") + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + + def get_param(image, scale, ratio): + height, width = _get_image_size(image) + area = math_ops.cast(height * width, dtype=dtypes.float32) + ratio = ops.convert_to_tensor(ratio, dtype=dtypes.float32) + log_ratio = math_ops.log(ratio) + for _ in range(10): + target_area = area * random_ops.random_uniform([], minval=scale[0], maxval=scale[1], dtype=dtypes.float32) + aspect_ratio = math_ops.exp( + random_ops.random_uniform([], minval=log_ratio[0], maxval=log_ratio[1], dtype=dtypes.float32) + ) + + target_width = math_ops.to_int32(math_ops.round(math_ops.sqrt(target_area * aspect_ratio))) + + target_height = math_ops.to_int32(math_ops.round(math_ops.sqrt(target_area / aspect_ratio))) + + if 0 < target_width <= width and 0 < target_height <= height: + offset_height = random_ops.random_uniform( + [], minval=0, maxval=height - target_height + 1, dtype=dtypes.int32 + ) + + offset_width = random_ops.random_uniform( + [], minval=0, maxval=width - target_width + 1, dtype=dtypes.int32 + ) + + return offset_height, offset_width, target_height, target_width + + height = ops.convert_to_tensor(height, dtype=dtypes.float32) + width = ops.convert_to_tensor(width, dtype=dtypes.float32) + in_ratio = width / height + if in_ratio < ratio[0]: + target_width = width + target_height = math_ops.to_int32(math_ops.round(target_width / ratio[0])) + elif in_ratio > ratio[1]: + target_height = height + target_width = math_ops.to_int32(math_ops.round(target_height / ratio[1])) + else: + target_height = height + target_width = width + offset_height = (height - target_height) // 2 + offset_width = (width - target_width) // 2 + return offset_height, offset_width, target_height, target_width + + offset_height, offset_width, target_heigth, target_width = get_param(image, scale, ratio) + image = crop(image, offset_height, offset_width, target_heigth, target_width) + image = resize(image, size, interpolation) + return image + + +def random_vflip(image, prob): + '''Vertically flip the input image randomly with a given probability. + + Parameters + ---------- + image: + 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. + prob: + probability of the image being flipped. Default value is 0.5 + Returns: + A tensor of the same type and shape as image. + ------- + + ''' + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + random_prob = random_ops.random_uniform([], minval=0, maxval=1.0, dtype=dtypes.float32) + flip_flag = math_ops.less(random_prob, prob) + if flip_flag: + return vflip(image) + return image + + +def random_hflip(image, prob): + '''horizontally flip the input image randomly with a given probability. + + Parameters + ---------- + image: + 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. + prob: + probability of the image being flipped. Default value is 0.5 + Returns: + A tensor of the same type and shape as image. + ------- + + ''' + image = ops.convert_to_tensor(image, name='image') + image = _AssertAtLeast3DImage(image) + random_prob = random_ops.random_uniform([], minval=0, maxval=1.0, dtype=dtypes.float32) + flip_flag = math_ops.less(random_prob, prob) + if flip_flag: + return hflip(image) + return image + + +def random_rotation(image, degrees, interpolation, expand, center, fill): + '''Rotate the image by angle. + + Parameters + ---------- + image: + Input tensor. Must be 3D. + degrees: + Range of degrees to select from.If degrees is a number instead of sequence like (min, max), the range of degrees + will be (-degrees, +degrees). + interpolation: + Points outside the boundaries of the input are filled according to the given mode + (one of {'nearest', 'bilinear'}). + expand: + Optional expansion flag. + If true, expands the output to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center: + Optional center of rotation, (x, y). Origin is the upper left corner. + Default is the center of the image. + fill: + Pixel fill value for the area outside the rotated image. + Default is ``0``. If given a number, the value is used for all bands respectively. + + Returns: + Rotated image tensor. + ------- + + ''' + if isinstance(image, (tf.Tensor, np.ndarray)) and len(image.shape) == 3: + image = np.asarray(image) + else: + 'Image should be a 3d tensor or np.ndarray.' + h, w, c = image.shape[0], image.shape[1], image.shape[2] + + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number, it must be positive.' 'But got {}'.format(degrees)) + degrees = (-degrees, degrees) + elif not (isinstance(degrees, (list, tuple)) and len(degrees) == 2): + raise ValueError('If degrees is a list/tuple, it must be length of 2.' 'But got {}'.format(degrees)) + else: + if degrees[0] > degrees[1]: + raise ValueError('if degrees is a list/tuple, it should be (min, max).') + + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + if interpolation not in ('nearest', 'bilinear'): + raise ValueError('Interpolation only support {\'nearest\', \'bilinear\'} .') + + orig_dtype = image.dtype + image = np.asarray(image, dtype=np.float) + theta = np.random.uniform(degrees[0], degrees[1]) + angle = -math.radians(theta) + rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) + + if center is None: + rotn_center = (w / 2.0, h / 2.0) + else: + rotn_center = center + + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform(-rotn_center[0] - 0, -rotn_center[1] - 0, matrix) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + + if expand: + # calculate output size + xx = [] + yy = [] + for x, y in ((0, 0), (w, 0), (w, h), (0, h)): + x, y = transform(x, y, matrix) + xx.append(x) + yy.append(y) + nw = math.ceil(max(xx)) - math.floor(min(xx)) + nh = math.ceil(max(yy)) - math.floor(min(yy)) + matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix) + w, h = nw, nh + + image = np.rollaxis(image, 2, 0) + dummy = np.ones((1, image.shape[1], image.shape[2]), dtype=image.dtype) + image = np.concatenate((image, dummy), axis=0) + final_offset = np.array([matrix[5], matrix[2]]) + + channel_images = [ + ndimage.interpolation.affine_transform( + x_channel, rotation_matrix, final_offset, output_shape=(h, w), order=3, mode='constant', cval=0 + ) for x_channel in image + ] + image = np.stack(channel_images, axis=0) + image = np.rollaxis(image, 0, 3) + mask = image[:, :, -1:] + image = image[:, :, :-1] + mask = np.tile(mask, (1, 1, image.shape[2])) + fill = np.tile(fill, (image.shape[0], image.shape[1], 1)) + if interpolation == 'nearest': + mask = mask < 0.5 + image[mask] = fill[mask] + else: + image = image * mask + (1.0 - mask) * fill + image = np.asarray(image, dtype=orig_dtype) + image = ops.convert_to_tensor(image) + return image + + +def transform_matrix_offset_center(matrix, x, y): + o_x = float(x) / 2 + 0.5 + o_y = float(y) / 2 + 0.5 + offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]]) + reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]]) + transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix) + return transform_matrix + + +def random_shear(image, degrees, interpolation, fill): + + if isinstance(image, (tf.Tensor, np.ndarray)) and len(image.shape) == 3: + image = np.asarray(image) + else: + 'Image should be a 3d tensor or np.ndarray.' + h, w, c = image.shape[0], image.shape[1], image.shape[2] + + if interpolation not in ('nearest', 'bilinear'): + raise ValueError('Interpolation only support {\'nearest\', \'bilinear\'} .') + + if isinstance(degrees, numbers.Number): + degrees = (-degrees, degrees, 0, 0) + elif isinstance(degrees, (list, tuple)) and (len(degrees) == 2 or len(degrees) == 4): + if len(degrees) == 2: + degrees = (degrees[0], degrees[1], 0, 0) + else: + raise ValueError( + 'degrees should be a single number or a list/tuple with length in (2 ,4).' + 'But got {}'.format(degrees) + ) + + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + orig_dtype = image.dtype + image = np.asarray(image, dtype=np.float) + shear = [np.random.uniform(degrees[0], degrees[1]), np.random.uniform(degrees[2], degrees[3])] + shear = np.deg2rad(shear) + shear_matrix = np.array( + [[math.cos(shear[1]), math.sin(shear[1]), 0], [math.sin(shear[0]), math.cos(shear[0]), 0], [0, 0, 1]] + ) + transform_matrix = shear_matrix + transform_matrix = transform_matrix_offset_center(transform_matrix, h, w) + + shear_matrix = transform_matrix[:2, :2] + offset = transform_matrix[:2, 2] + image = np.rollaxis(image, 2, 0) + + dummy = np.ones((1, image.shape[1], image.shape[2]), dtype=image.dtype) + image = np.concatenate((image, dummy), axis=0) + + channel_images = [ + ndimage.interpolation.affine_transform(x_channel, shear_matrix, offset, order=3, mode='constant', cval=0) + for x_channel in image + ] + + image = np.stack(channel_images, axis=0) + image = np.rollaxis(image, 0, 3) + mask = image[:, :, -1:] + image = image[:, :, :-1] + mask = np.tile(mask, (1, 1, c)) + fill = np.tile(fill, (h, w, 1)) + if interpolation == 'nearest': + mask = mask < 0.5 + image[mask] = fill[mask] + else: + image = image * mask + (1.0 - mask) * fill + image = np.asarray(image, dtype=orig_dtype) + image = ops.convert_to_tensor(image) + return image + + +def random_shift(image, shift, interpolation, fill): + + if isinstance(image, (tf.Tensor, np.ndarray)) and len(image.shape) == 3: + image = np.asarray(image) + else: + 'Image should be a 3d tensor or np.ndarray.' + h, w, c = image.shape[0], image.shape[1], image.shape[2] + + if interpolation not in ('nearest', 'bilinear'): + raise ValueError('Interpolation only support {\'nearest\', \'bilinear\'} .') + + if not (isinstance(shift, (tuple, list)) and len(shift) == 2): + + raise ValueError('Shift should be a list/tuple with length of 2.' 'But got {}'.format(shift)) + + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + orig_dtype = image.dtype + image = np.asarray(image, dtype=np.float) + hrg = shift[0] + wrg = shift[1] + tx = -np.random.uniform(-hrg, hrg) * w + ty = -np.random.uniform(-wrg, wrg) * h + + shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) + + transform_matrix = transform_matrix_offset_center(shift_matrix, h, w) + shift_matrix = transform_matrix[:2, :2] + offset = transform_matrix[:2, 2] + image = np.rollaxis(image, 2, 0) + + dummy = np.ones((1, image.shape[1], image.shape[2]), dtype=image.dtype) + image = np.concatenate((image, dummy), axis=0) + + channel_images = [ + ndimage.interpolation.affine_transform(x_channel, shift_matrix, offset, order=3, mode='constant', cval=0) + for x_channel in image + ] + + image = np.stack(channel_images, axis=0) + image = np.rollaxis(image, 0, 3) + mask = image[:, :, -1:] + image = image[:, :, :-1] + mask = np.tile(mask, (1, 1, c)) + fill = np.tile(fill, (h, w, 1)) + if interpolation == 'nearest': + mask = mask < 0.5 + image[mask] = fill[mask] + else: + image = image * mask + (1.0 - mask) * fill + image = np.asarray(image, dtype=orig_dtype) + image = ops.convert_to_tensor(image) + return image + + +def random_zoom(image, zoom, interpolation, fill): + + if isinstance(image, (tf.Tensor, np.ndarray)) and len(image.shape) == 3: + image = np.asarray(image) + else: + 'Image should be a 3d tensor or np.ndarray.' + h, w, c = image.shape[0], image.shape[1], image.shape[2] + + if interpolation not in ('nearest', 'bilinear'): + raise ValueError('Interpolation only support {\'nearest\', \'bilinear\'} .') + + if not (isinstance(zoom, (tuple, list)) and len(zoom) == 2): + + raise ValueError('Zoom should be a list/tuple with length of 2.' 'But got {}'.format(zoom)) + if not (0 <= zoom[0] <= zoom[1]): + + raise ValueError('Zoom values should be positive, and zoom[1] should be greater than zoom[0].') + + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + + orig_dtype = image.dtype + image = np.asarray(image, dtype=np.float) + zoom_factor = 1 / np.random.uniform(zoom[0], zoom[1]) + zoom_matrix = np.array([[zoom_factor, 0, 0], [0, zoom_factor, 0], [0, 0, 1]]) + transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w) + zoom_matrix = transform_matrix[:2, :2] + offset = transform_matrix[:2, 2] + + image = np.rollaxis(image, 2, 0) + + dummy = np.ones((1, image.shape[1], image.shape[2]), dtype=image.dtype) + image = np.concatenate((image, dummy), axis=0) + + channel_images = [ + ndimage.interpolation.affine_transform(x_channel, zoom_matrix, offset, order=3, mode='constant', cval=0) + for x_channel in image + ] + + image = np.stack(channel_images, axis=0) + image = np.rollaxis(image, 0, 3) + mask = image[:, :, -1:] + image = image[:, :, :-1] + mask = np.tile(mask, (1, 1, c)) + fill = np.tile(fill, (h, w, 1)) + if interpolation == 'nearest': + mask = mask < 0.5 + image[mask] = fill[mask] + else: + image = image * mask + (1.0 - mask) * fill + image = np.asarray(image, dtype=orig_dtype) + image = ops.convert_to_tensor(image) + return image + + +def random_affine(image, degrees, shift, zoom, shear, interpolation, fill): + + if isinstance(image, (tf.Tensor, np.ndarray)) and len(image.shape) == 3: + image = np.asarray(image) + else: + 'Image should be a 3d tensor or np.ndarray.' + h, w, c = image.shape[0], image.shape[1], image.shape[2] + + if isinstance(fill, numbers.Number): + fill = (fill, ) * c + elif not (isinstance(fill, (list, tuple)) and len(fill) == c): + raise ValueError( + 'If fill should be a single number or a list/tuple with length of image channels.' + 'But got {}'.format(fill) + ) + orig_dtype = image.dtype + image = np.asarray(image, dtype=np.float) + theta = np.random.uniform(degrees[0], degrees[1]) + theta = np.deg2rad(theta) + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) + transform_matrix = rotation_matrix + + if shift is not None: + max_dx = float(shift[0] * w) + max_dy = float(shift[1] * h) + tx = -int(round(np.random.uniform(-max_dx, max_dx))) + ty = -int(round(np.random.uniform(-max_dy, max_dy))) + shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) + transform_matrix = np.dot(transform_matrix, shift_matrix) + + if shear is not None: + shear_x = shear_y = 0 + shear_x = float(np.random.uniform(shear[0], shear[1])) + if len(shear) == 4: + shear_y = float(np.random.uniform(shear[2], shear[3])) + shear_x = np.deg2rad(shear_x) + shear_y = np.deg2rad(shear_y) + shear_matrix = np.array( + [[math.cos(shear_y), math.sin(shear_y), 0], [math.sin(shear_x), math.cos(shear_x), 0], [0, 0, 1]] + ) + transform_matrix = np.dot(transform_matrix, shear_matrix) + + if zoom is not None: + zoom = 1 / float(np.random.uniform(zoom[0], zoom[1])) + zoom_matrix = np.array([[zoom, 0, 0], [0, zoom, 0], [0, 0, 1]]) + + transform_matrix = np.dot(transform_matrix, zoom_matrix) + + transform_matrix = transform_matrix_offset_center(transform_matrix, h, w) + image = np.rollaxis(image, 2, 0) + finale_affine_matrix = transform_matrix[:2, :2] + finale_offset = transform_matrix[:2, 2] + dummy = np.ones((1, h, w), dtype=image.dtype) + image = np.concatenate((image, dummy), axis=0) + + channel_images = [ + ndimage.interpolation.affine_transform( + x_channel, finale_affine_matrix, finale_offset, order=3, mode='constant', cval=0 + ) for x_channel in image + ] + + image = np.stack(channel_images, axis=0) + image = np.rollaxis(image, 0, 3) + mask = image[:, :, -1:] + image = image[:, :, :-1] + mask = np.tile(mask, (1, 1, c)) + fill = np.tile(fill, (h, w, 1)) + if interpolation == 'nearest': + mask = mask < 0.5 + image[mask] = fill[mask] + else: + image = image * mask + (1.0 - mask) * fill + image = np.asarray(image, dtype=orig_dtype) + image = ops.convert_to_tensor(image) + return image diff --git a/tensorlayer/vision/transforms.py b/tensorlayer/vision/transforms.py new file mode 100644 index 0000000..89f1ca4 --- /dev/null +++ b/tensorlayer/vision/transforms.py @@ -0,0 +1,1256 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import tensorlayer as tl +from . import load_vision_backend as F +import numbers +import numpy as np +__all__ = [ + 'Crop', + 'CentralCrop', + 'HsvToRgb', + 'AdjustBrightness', + 'AdjustContrast', + 'AdjustHue', + 'AdjustSaturation', + 'FlipHorizontal', + 'FlipVertical', + 'RgbToGray', + 'PadToBoundingbox', + 'Pad', + 'Normalize', + 'StandardizePerImage', + 'RandomBrightness', + 'RandomContrast', + 'RandomHue', + 'RandomSaturation', + 'RandomCrop', + 'Resize', + 'RgbToHsv', + 'Transpose', + 'RandomRotation', + 'RandomShift', + 'RandomShear', + 'RandomZoom', + 'RandomFlipVertical', + 'RandomFlipHorizontal', + 'HWC2CHW', + 'CHW2HWC', + 'ToTensor', + 'Compose', + 'RandomResizedCrop', + 'RandomAffine', + 'ColorJitter', +] + + +class ToTensor(object): + """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. + + Parameters + ---------- + data_format : str + Data format of output tensor, should be 'HWC' or 'CHW'. Default: 'HWC'. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.ToTensor(data_format='HWC') + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, data_format='HWC'): + + if not data_format in ['CHW', 'HWC']: + raise ValueError('data_format should be CHW or HWC. Got {}'.format(data_format)) + + self.data_format = data_format + + def __call__(self, image): + + F.to_tensor(image, self.data_format) + + +class CentralCrop(object): + """Crops the given image at the center.If the size is given, image will be cropped as size. + If the central_fraction is given, image will cropped as (H * central_fraction, W * fraction). + Size has a higher priority. + + Parameters + ---------- + size : int or sequence of int + The output size of the cropped image. + If size is an integer, a square crop of size (size, size) is returned. + If size is a sequence of length 2, it should be (height, width). + central_fraction : float + float (0, 1], fraction of size to crop + + Examples + ---------- + With TensorLayer + + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.CentralCrop(size = (50, 50)) + >>> image = transform(image) + >>> print(image) + >>> image shape : (50, 50, 3) + + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.CentralCrop(central_fraction=0.5) + >>> image = transform(image) + >>> print(image) + >>> image shape : (112, 112, 3) + + """ + + def __init__(self, size=None, central_fraction=None): + + self.central_fraction = central_fraction + self.size = size + + def __call__(self, image): + + F.central_crop(image, self.size, self.central_fraction) + + +class Compose(object): + """Composes several transforms together. + + Parameters + ---------- + transforms : list of 'transform' objects + list of transforms to compose. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.Compose([tl.vision.transforms.ToTensor(data_format='HWC'),tl.vision.transforms.CentralCrop(size = 100)]) + >>> image = transform(image) + >>> print(image) + >>> image shape : (100, 100, 3) + + """ + + def __init__(self, transforms): + + self.transforms = transforms + + def __call__(self, data): + + for t in self.transforms: + + data = t(data) + + return data + + +class Crop(object): + """Crops an image to a specified bounding box. + + Parameters + ---------- + offset_height : int + Vertical coordinate of the top-left corner of the bounding box in image. + offset_width: int + Horizontal coordinate of the top-left corner of the bounding box in image. + target_height: int + Height of the bounding box. + target_width: int + Width of the bounding box. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.Crop(offset_height=10, offset_width=10, target_height=100, target_width=100) + >>> image = transform(image) + >>> print(image) + >>> image shape : (100, 100, 3) + + """ + + def __init__(self, offset_height, offset_width, target_height, target_width): + + self.offset_height = offset_height + self.offset_width = offset_width + self.target_height = target_height + self.target_width = target_width + + def __call__(self, image): + + return F.crop(image, self.offset_height, self.offset_width, self.target_height, self.target_width) + + +class Pad(object): + """Pad the given image on all sides with the given "pad" value. + + Parameters + ---------- + padding : int or sequenece + Padding on each border. + If a single int is provided, this is used to pad all borders. + If sequence of length 2 is provided, this is the padding on left/right and top/bottom respectively. + If a sequence of length 4 is provided, this is the padding for the left, top, right and bottom borders respectively. + padding_value : number or sequenece + Pixel fill value for constant fill. Default is 0. + If a tuple of length 3, it is used to fill R, G, B channels respectively. + This value is only used when the mode is constant. + mode : str + Type of padding. Default is constant. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.Pad(padding=10, padding_value=0, mode='constant') + >>> image = transform(image) + >>> print(image) + >>> image shape : (244, 244, 3) + + """ + + def __init__(self, padding, padding_value=0, mode='constant'): + + self.padding = padding + self.padding_value = padding_value + self.mode = mode + + def __call__(self, image): + + return F.pad(image, self.padding, self.padding_value, self.mode) + + +class Resize(object): + """Resize the input image to the given size. + + Parameters + ---------- + size : int or sequenece + Desired output size. + If size is a sequence like (h, w), output size will be matched to this. + If size is an int, smaller edge of the image will be matched to this number. + i.e, if height > width, then image will be rescaled to (size * height / width, size). + interpolation : str + Interpolation method. Default: 'bilinear'. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.Resize(size = (100,100), interpolation='bilinear') + >>> image = transform(image) + >>> print(image) + >>> image shape : (100, 100, 3) + + """ + + def __init__(self, size, interpolation='bilinear'): + + self.size = size + self.interpolation = interpolation + + def __call__(self, image): + + return F.resize(image, self.size, self.interpolation) + + +class Transpose(object): + """Transpose image(s) by swapping dimension. + + Parameters + ---------- + order : sequenece of int + Desired output dimension order. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.Transpose(order=(2, 0 ,1)) + >>> image = transform(image) + >>> print(image) + >>> image shape : (3, 224, 224) + + """ + + def __init__(self, order): + + self.order = order + + def __call__(self, image): + + return F.transpose(image, self.order) + + +class HWC2CHW(object): + """Transpose a image shape (H, W, C) to shape (C, H, W). + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.HWC2CHW() + >>> image = transform(image) + >>> print(image) + >>> image shape : (3, 224, 224) + + """ + + def __call__(self, image): + + F.hwc_to_chw(image) + + +class CHW2HWC(object): + """Transpose a image shape (C, H, W) to shape (H, W, C). + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(3, 224, 224) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.CHW2HWC() + >>> image = transform(image) + >>> print(image) + >>> image shape : (224, 224, 3) + + """ + + def __call__(self, image): + + F.chw_to_hwc(image) + + +class RgbToHsv(object): + """Converts a image from RGB to HSV. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RgbToHsv() + >>> image = transform(image) + >>> print(image) + >>> image shape : (224, 224, 3) + + """ + + def __call__(self, image): + + F.rgb_to_hsv(image) + + +class HsvToRgb(object): + """Converts a image from HSV to RGB. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.HsvToRgb() + >>> image = transform(image) + >>> print(image) + >>> image shape : (224, 224, 3) + + """ + + def __call__(self, image): + + F.hsv_to_rgb(image) + + +class RgbToGray(object): + """Converts a image from RGB to grayscale. + + Parameters + ---------- + num_output_channels: int + (1 or 3) number of channels desired for output image. Default is 1. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RgbToGray(num_output_channels=1) + >>> image = transform(image) + >>> print(image) + >>> image shape : (224, 224, 1) + + """ + + def __init__(self, num_output_channels=1): + + self.num_output_channels = num_output_channels + + def __call__(self, image): + + F.rgb_to_gray(image, self.num_output_channels) + + +class AdjustBrightness(object): + """Adjust brightness of the image. + + Parameters + ---------- + brightness_factor: float + How much to adjust the brightness. Can be any non negative number. 1 gives the original image. + Default is 1. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.AdjustBrightness(brightness_factor=1) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, brightness_factor=1): + self.brightness_factor = brightness_factor + + def __call__(self, image): + + return F.adjust_brightness(image, self.brightness_factor) + + +class AdjustContrast(object): + """Adjust contrast of the image. + + Parameters + ---------- + contrast_factor: float + How much to adjust the contrast. Can be any non negative number. 1 gives the original image. + Default is 1. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.AdjustContrast(contrast_factor=1) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, contrast_factor=1): + + self.contrast_factor = contrast_factor + + def __call__(self, image): + + return F.adjust_contrast(image, self.contrast_factor) + + +class AdjustHue(object): + """Adjust hue of the image. + + Parameters + ---------- + hue_factor: float + How much to shift the hue channel. Should be in [-0.5, 0.5]. + 0.5 and -0.5 give complete reversal of hue channel in HSV space in positive and negative direction respectively. + 0 means no shift. Therefore, both -0.5 and 0.5 will give an image with complementary colors while 0 gives the original image. + Default is 0. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.AdjustHue(hue_factor=0) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, hue_factor=0): + + self.hue_factor = hue_factor + + def __call__(self, image): + + return F.adjust_hue(image, self.hue_factor) + + +class AdjustSaturation(object): + """Adjust saturation of the image. + + Parameters + ---------- + saturation_factor: float + How much to adjust the saturation. Can be any non negative number. 1 gives the original image. + Default is 1. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.AdjustSaturation(saturation_factor=1) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, saturation_factor=1): + + self.saturation_factor = saturation_factor + + def __call__(self, image): + + return F.adjust_saturation(image, self.saturation_factor) + + +class FlipHorizontal(object): + """Flip an image horizontally. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.FlipHorizontal() + >>> image = transform(image) + >>> print(image) + + """ + + def __call__(self, image): + + return F.hflip(image) + + +class FlipVertical(object): + """Flip an image vertically. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.FlipVertical() + >>> image = transform(image) + >>> print(image) + + """ + + def __call__(self, image): + + return F.vflip(image) + + +class PadToBoundingbox(object): + """Pad image with the specified height and width to target size. + + Parameters + ---------- + offset_height: int + Number of rows to add on top. + offset_width: int + Number of columns to add on the left. + target_height: int + Height of output image. + target_width: int + Width of output image. + padding_value: int or sequence + value to pad. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand( 224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.PadToBoundingbox(offset_height=10, offset_width=10, target_height=300, target_width=300, padding_value=0) + >>> image = transform(image) + >>> print(image) + >>> image shape : (300, 300, 3) + """ + + def __init__(self, offset_height, offset_width, target_height, target_width, padding_value=0): + self.offset_height = offset_height + self.offset_width = offset_width + self.target_height = target_height + self.target_width = target_width + self.padding_value = padding_value + + def __call__(self, image): + + return F.padtoboundingbox( + image, self.offset_height, self.offset_width, self.target_height, self.target_width, self.padding_value + ) + + +class Normalize(object): + """Normalize a tensor image with mean and standard deviation. + + Parameters + ---------- + mean: number or sequence + If mean is a number, mean will be applied for all channels. Sequence of means for each channel. + std: number or sequnece + If std is a number, std will be applied for all channels.Sequence of standard deviations for each channel. + data_format: str + Data format of input image, should be 'HWC' or 'CHW'. Default: 'HWC'. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand( 224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.Normalize(mean = (155.0, 155.0, 155.0), std = (75.0, 75.0, 75.0),data_format='HWC') + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, mean, std, data_format='HWC'): + + self.mean = mean + self.std = std + self.data_format = data_format + + def __call__(self, image): + + return F.normalize(image, self.mean, self.std, self.data_format) + + +class StandardizePerImage(object): + """For each 3-D image x in image, computes (x - mean) / adjusted_stddev, where mean is the average of all values in x. + adjusted_stddev = max(stddev, 1.0/sqrt(N)) is capped away from 0 to protect against division by 0 when handling uniform images. + N is the number of elements in x. stddev is the standard deviation of all values in x + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand( 224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.StandardizePerImage() + >>> image = transform(image) + >>> print(image) + + """ + + def __call__(self, image): + + return F.standardize(image) + + +class RandomBrightness(object): + """Random adjust brightness of the image. + + Parameters + ---------- + brightness_factor: float or sequence + Brightness adjustment factor (default=(1, 1)). + If it is a float, the factor is uniformly chosen from the range [max(0, 1-brightness_factor), 1+brightness_factor]. + If it is a sequence, it should be [min, max] for the range.Should be non negative numbers. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomBrightness(brightness_factor=(0.5, 2)) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, brightness_factor=(1, 1)): + self.brighthness_factor = brightness_factor + + def __call__(self, image): + + return F.random_brightness(image, self.brighthness_factor) + + +class RandomContrast(object): + """Random adjust contrast of the image. + + Parameters + ---------- + contrast_factor: float or sequence + Contrast adjustment factor (default=(1, 1)). + If it is a float, the factor is uniformly chosen from the range [max(0, 1-contrast_factor), 1+contrast_factor]. + If it is a sequence, it should be [min, max] for the range.Should be non negative numbers. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomContrast(contrast_factor=(0.5, 2)) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, contrast_factor=(1, 1)): + + self.contrast_factor = contrast_factor + + def __call__(self, image): + + return F.random_contrast(image, self.contrast_factor) + + +class RandomSaturation(object): + """Random adjust saturation of the image. + + Parameters + ---------- + saturation_factor: float or sequence + Saturation adjustment factor (default=(1, 1)). + If it is a float, the factor is uniformly chosen from the range [max(0, 1-saturation_factor), 1+saturation_factor]. + If it is a sequence, it should be [min, max] for the range.Should be non negative numbers. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomSaturation(saturation_factor=(0.5, 2)) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, saturation_factor=(1, 1)): + + self.saturation_factor = saturation_factor + + def __call__(self, image): + + return F.random_saturation(image, self.saturation_factor) + + +class RandomHue(object): + """Random adjust hue of the image. + + Parameters + ---------- + hue_factor: float or sequence + Hue adjustment factor (default=(0, 0)). + If it is a float, the factor is uniformly chosen from the range [-hue_factor, hue_factor]. + If it is a sequence, it should be [min, max] for the range.Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomHue(hue_factor=(-0.5, 0.5)) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, hue_factor=(0, 0)): + + self.hue_factor = hue_factor + + def __call__(self, image): + + return F.random_hue(image, self.hue_factor) + + +class RandomCrop(object): + """Crop the given image at a random location. + + Parameters + ---------- + size: int or sequence + Desired output size of the crop. + If size is an int instead of sequence like (h, w), a square crop (size, size) is made. + If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + padding: int or sequence, optional + Optional padding on each border of the image. + If a single int is provided this is used to pad all borders. + If sequence of length 2 is provided this is the padding on left/right and top/bottom respectively. + If a sequence of length 4 is provided, it is used to pad left, top, right, bottom borders respectively. + Default: 0. + pad_if_needed: boolean + It will pad the image if smaller than the desired size to avoid raising an exception. + Since cropping is done after padding, the padding seems to be done at a random offset. + fill: number or sequence + Pixel fill value for constant fill. Default is 0. + If a tuple of length 3, it is used to fill R, G, B channels respectively. + padding_mode: str + Type of padding. Default is constant. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomCrop(size=50, padding=10, pad_if_needed=False, fill=0, padding_mode='constant') + >>> image = transform(image) + >>> print(image) + >>> image shape : (70,70,3) + + """ + + def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant'): + + self.size = size + self.padding = padding + self.pad_if_needed = pad_if_needed + self.fill = fill + self.padding_mode = padding_mode + + def __call__(self, image): + + return F.random_crop( + image, + size=self.size, + padding=self.padding, + pad_if_needed=self.pad_if_needed, + fill=self.fill, + padding_mode=self.padding_mode, + ) + + +class RandomResizedCrop(object): + """Crop the given image to random size and aspect ratio. + + Parameters + ---------- + size: int or sequence + Desired output size of the crop. + If size is an int instead of sequence like (h, w), a square crop (size, size) is made. + If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + scale: tuple of float + scale range of the cropped image before resizing, relatively to the origin image. + ratio: tuple of float + aspect ratio range of the cropped image before resizing. + interpolation: str + Type of interpolation. Default is bilinear. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomResizedCrop(size = (100, 100), scale = (0.08, 1.0), ratio = (3./4.,4./3.), interpolation = 'bilinear') + >>> image = transform(image) + >>> print(image) + >>> image shape : (100,100,3) + + """ + + def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation='bilinear'): + self.size = size + self.scale = scale + self.ratio = ratio + self.interpolation = interpolation + + def __call__(self, image): + + return F.random_resized_crop(image, self.size, self.scale, self.ratio, self.interpolation) + + +class RandomFlipVertical(object): + """Vertically flip the given image randomly with a given probability. + + Parameters + ---------- + prob: float + probability of the image being flipped. Default value is 0.5 + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomFlipVertical(prob = 0.5) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, prob=0.5): + + self.prob = prob + + def __call__(self, image): + + return F.random_vflip(image, self.prob) + + +class RandomFlipHorizontal(object): + """Horizontally flip the given image randomly with a given probability. + + Parameters + ---------- + prob: float + probability of the image being flipped. Default value is 0.5 + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomFlipHorizontal(prob = 0.5) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, prob=0.5): + + self.prob = prob + + def __call__(self, image): + + return F.random_hflip(image, self.prob) + + +class RandomRotation(object): + """Rotate the image by random angle. + + Parameters + ---------- + degrees: number or sequnence + Range of degrees to select from. + If degrees is a number, the range of degrees will be (-degrees, +degrees). + If degrees is a sequence, the range of degrees will (degrees[0], degrees[1]). + interpolation: str + Interpolation method. Default is 'bilinear'. + expand: boolean + If true, expands the output to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center: sequence or None + Optional center of rotation, (x, y). Origin is the upper left corner. + Default is the center of the image. + fill: number or sequence + Pixel fill value for the area outside the rotated image. Default is 0. + + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomRotation(degrees=30, interpolation='bilinear', expand=False, center=None, fill=0) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, degrees, interpolation='bilinear', expand=False, center=None, fill=0): + + self.degrees = degrees + self.interpolation = interpolation + self.expand = expand + self.center = center + self.fill = fill + + def __call__(self, image): + + return F.random_rotation(image, self.degrees, self.interpolation, self.expand, self.center, self.fill) + + +class RandomShear(object): + """Shear the image by random angle. + + Parameters + ---------- + degrees: number or sequnence + Range of degrees to select from. + If degrees is a number, a shear parallel to the x axis in the range (-shear, +shear) will be applied. + If shear is a sequence of 2 values a shear parallel to the x axis in the range (shear[0], shear[1]) will be applied. + If shear is a sequence of 4 values, a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied. + interpolation: str + Interpolation method. Default is 'bilinear'. + fill: number or sequence + Pixel fill value for the area outside the sheared image. Default is 0. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomShear(degrees=30, interpolation='bilinear', fill=0) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, degrees, interpolation='bilinear', fill=0): + + self.degrees = degrees + self.interpolation = interpolation + self.fill = fill + + def __call__(self, image): + + return F.random_shear(image, self.degrees, self.interpolation, self.fill) + + +class RandomShift(object): + """Shift the image by random translations. + + Parameters + ---------- + shift: list or tuple + Maximum absolute fraction for horizontal and vertical translations. + shift=(a, b), then horizontal shift is randomly sampled in the range -img_width * a < dx < img_width * a. + vertical shift is randomly sampled in the range -img_height * b < dy < img_height * b. + interpolation: str + Interpolation method. Default is 'bilinear'. + fill: number or sequence + Pixel fill value for the area outside the sheared image. Default is 0. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomShift(shift=(0.2, 0.2), interpolation='bilinear', fill=0) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, shift, interpolation='bilinear', fill=0): + + self.shift = shift + self.interpolation = interpolation + self.fill = fill + + def __call__(self, image): + + return F.random_shift(image, self.shift, self.interpolation, self.fill) + + +class RandomZoom(object): + """Zoom the image by random scale. + + Parameters + ---------- + zoom: list or tuple + Scaling factor interval, e.g (a, b), then scale is randomly sampled from the range a <= scale <= b. + interpolation: str + Interpolation method. Default is 'bilinear'. + fill: number or sequence + Pixel fill value for the area outside the sheared image. Default is 0. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomZoom(zoom=(0.2, 0.5), interpolation='bilinear', fill=0) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, zoom, interpolation='bilinear', fill=0): + + self.zoom = zoom + self.interpolation = interpolation + self.fill = fill + + def __call__(self, image): + + return F.random_zoom(image, self.zoom, self.interpolation, self.fill) + + +class RandomAffine(object): + """Random affine transformation of the image keeping center invariant. + + Parameters + ---------- + degrees: number or sequnence + Range of degrees to select from. + If degrees is a number, the range of degrees will be (-degrees, +degrees). + If degrees is a sequence, the range of degrees will (degrees[0], degrees[1]). + Set to 0 to deactivate rotations. + shift: sequence or None + Maximum absolute fraction for horizontal and vertical translations. + shift=(a, b), then horizontal shift is randomly sampled in the range -img_width * a < dx < img_width * a. + vertical shift is randomly sampled in the range -img_height * b < dy < img_height * b. + Will not shift by default. + shear: number or sequnence or None + Range of degrees to select from. + If degrees is a number, a shear parallel to the x axis in the range (-shear, +shear) will be applied. + If shear is a sequence of 2 values a shear parallel to the x axis in the range (shear[0], shear[1]) will be applied. + If shear is a sequence of 4 values, a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied. + Will not apply shear by default. + zoom: sequence or None + Scaling factor interval, e.g (a, b), then scale is randomly sampled from the range a <= scale <= b. + Will not zoom by default. + interpolation: str + Interpolation method. Default is 'bilinear'. + fill: number or sequence + Pixel fill value for the area outside the sheared image. Default is 0. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.RandomAffine(degrees=30, shift=(0.2,0.2), zoom=(0.2, 0.5), shear=30, interpolation='bilinear', fill=0) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, degrees, shift=None, zoom=None, shear=None, interpolation='bilinear', fill=0): + + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number, it must be positive.' 'But got {}.'.format(degrees)) + degrees = [-degrees, degrees] + elif not (isinstance(degrees, (list, tuple)) and len(degrees) == 2): + raise TypeError('If degrees is a list or tuple, it should be length of 2.' 'But got {}'.format(degrees)) + + self.degrees = (float(x) for x in degrees) + + if shift is not None: + if not (isinstance(shift, (list, tuple)) and len(shift) == 2): + raise TypeError("shift should be a list or tuple of length 2." "But got {}.".format(shift)) + + for s in shift: + if not (0.0 <= s <= 1.0): + raise ValueError('shift values should be between 0 and 1.' 'But got {}.'.format(shift)) + self.shift = shift + + if zoom is not None: + if not (isinstance(zoom, (list, tuple)) and len(zoom) == 2): + raise TypeError("zoom should be a list or tuple of length 2." "But got {}.".format(zoom)) + + if not (0 <= zoom[0] <= zoom[1]): + raise ValueError("zoom valuse should be positive, and zoom[1] should be less than zoom[0].") + + self.zoom = zoom + + if shear is not None: + if isinstance(shear, numbers.Number): + if shear < 0: + raise ValueError("If shear is a single number, it must be positive.") + shear = [-shear, shear] + elif not (isinstance(shear, (list, tuple)) and len(shear) in (2, 4)): + raise TypeError('shear should be a list or tuple of length (2, 4).') + + self.shear = (float(x) for x in shear) + + self.interpolation = interpolation + + if fill is None: + fill = 0 + elif not isinstance(fill, (list, tuple, numbers.Number)): + raise TypeError("Fill should be either a sequence or a number.") + + self.fill = fill + + def __call__(self, image): + + return F.random_affine(image, self.degrees, self.shift, self.zoom, self.shear, self.interpolation, self.fill) + + +class ColorJitter(object): + """Randomly change the brightness, contrast, saturation and hue of an image. + + Parameters + ---------- + brightness: float or sequence + Brightness adjustment factor (default=(1, 1)). + If it is a float, the factor is uniformly chosen from the range [max(0, 1-brightness_factor), 1+brightness_factor]. + If it is a sequence, it should be [min, max] for the range.Should be non negative numbers. + contrast: float or sequence + Contrast adjustment factor (default=(1, 1)). + If it is a float, the factor is uniformly chosen from the range [max(0, 1-contrast_factor), 1+contrast_factor]. + If it is a sequence, it should be [min, max] for the range.Should be non negative numbers. + saturation: float or sequence + Saturation adjustment factor (default=(1, 1)). + If it is a float, the factor is uniformly chosen from the range [max(0, 1-saturation_factor), 1+saturation_factor]. + If it is a sequence, it should be [min, max] for the range.Should be non negative numbers. + hue: float or sequence + Hue adjustment factor (default=(0, 0)). + If it is a float, the factor is uniformly chosen from the range [-hue_factor, hue_factor]. + If it is a sequence, it should be [min, max] for the range.Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5. + + Examples + ---------- + With TensorLayer + + >>> image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) + >>> transform = tl.vision.transforms.ColorJitter(brightness=(1,5), contrast=(1,5), saturation=(1,5), hue=(-0.2,0.2)) + >>> image = transform(image) + >>> print(image) + + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + + self.brightness = self._check_input(brightness, 'brightness') + self.contrast = self._check_input(contrast, 'contrast') + self.saturation = self._check_input(saturation, 'saturation') + self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5), clip_first_on_zero=False) + + def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): + if isinstance(value, numbers.Number): + if value < 0: + raise ValueError("If {} is a single number, it must be non negative.".format(name)) + value = [center - float(value), center + float(value)] + if clip_first_on_zero: + value[0] = max(value[0], 0.0) + elif isinstance(value, (tuple, list)) and len(value) == 2: + if not bound[0] <= value[0] <= value[1] <= bound[1]: + raise ValueError("{} values should be between {}".format(name, bound)) + else: + raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name)) + + if value[0] == value[1] == center: + value = None + return value + + @staticmethod + def get_params(brightness, contrast, saturation, hue): + fn_idx = np.random.permutation(np.arange(4)) + + b = None if brightness is None else float(np.random.uniform(brightness[0], brightness[1])) + c = None if contrast is None else float(np.random.uniform(contrast[0], contrast[1])) + s = None if saturation is None else float(np.random.uniform(saturation[0], saturation[1])) + h = None if hue is None else float(np.random.uniform(hue[0], hue[1])) + + return fn_idx, b, c, s, h + + def __call__(self, image): + + fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = \ + self.get_params(self.brightness, self.contrast, self.saturation, self.hue) + + for fn_id in fn_idx: + if fn_id == 0 and brightness_factor is not None: + image = F.adjust_brightness(image, brightness_factor) + elif fn_id == 1 and contrast_factor is not None: + image = F.adjust_contrast(image, contrast_factor) + elif fn_id == 2 and saturation_factor is not None: + image = F.adjust_saturation(image, saturation_factor) + elif fn_id == 3 and hue_factor is not None: + image = F.adjust_hue(image, hue_factor) + + return image diff --git a/tensorlayer/visualize.py b/tensorlayer/visualize.py index 72c1b18..ad05acf 100644 --- a/tensorlayer/visualize.py +++ b/tensorlayer/visualize.py @@ -5,9 +5,9 @@ import os import imageio import numpy as np - import tensorlayer as tl from tensorlayer.lazy_imports import LazyImport +import colorsys, random cv2 = LazyImport("cv2") @@ -16,18 +16,9 @@ cv2 = LazyImport("cv2") # matplotlib.use('Agg') __all__ = [ - 'read_image', - 'read_images', - 'save_image', - 'save_images', - 'draw_boxes_and_labels_to_image', - 'draw_mpii_people_to_image', - 'frame', - 'CNN2d', - 'images2d', - 'tsne_embedding', - 'draw_weights', - 'W', + 'read_image', 'read_images', 'save_image', 'save_images', 'draw_boxes_and_labels_to_image', + 'draw_mpii_people_to_image', 'frame', 'CNN2d', 'images2d', 'tsne_embedding', 'draw_weights', 'W', + 'draw_boxes_and_labels_to_image_with_json' ] @@ -662,3 +653,66 @@ def draw_weights(W=None, second=10, saveable=True, shape=None, name='mnist', fig W = draw_weights + + +def draw_boxes_and_labels_to_image_with_json(image, json_result, class_list, save_name=None): + """Draw bboxes and class labels on image. Return the image with bboxes. + + Parameters + ----------- + image : numpy.array + The RGB image [height, width, channel]. + json_result : list of dict + The object detection result with json format. + classes_list : list of str + For converting ID to string on image. + save_name : None or str + The name of image file (i.e. image.png), if None, not to save image. + + Returns + ------- + numpy.array + The saved image. + + References + ----------- + - OpenCV rectangle and putText. + - `scikit-image `__. + + """ + image_h, image_w, _ = image.shape + num_classes = len(class_list) + hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)] + colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) + colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) + random.seed(0) + random.shuffle(colors) + random.seed(None) + bbox_thick = int(0.6 * (image_h + image_w) / 600) + fontScale = 0.5 + + for bbox_info in json_result: + image_name = bbox_info['image'] + category_id = bbox_info['category_id'] + if category_id < 0 or category_id > num_classes: continue + bbox = bbox_info['bbox'] # the order of coordinates is [x1, y2, x2, y2] + score = bbox_info['score'] + + bbox_color = colors[category_id] + c1, c2 = (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])) + cv2.rectangle(image, c1, c2, bbox_color, bbox_thick) + + bbox_mess = '%s: %.2f' % (class_list[category_id], score) + t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0] + c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3) + cv2.rectangle(image, c1, (np.float32(c3[0]), np.float32(c3[1])), bbox_color, -1) + + cv2.putText( + image, bbox_mess, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 0, 0), + bbox_thick // 2, lineType=cv2.LINE_AA + ) + + if save_name is not None: + save_image(image, save_name) + + return image diff --git a/tensorlayer_cn.md b/tensorlayer_cn.md new file mode 100644 index 0000000..ac6844e --- /dev/null +++ b/tensorlayer_cn.md @@ -0,0 +1,346 @@ +TensorLayer3.0是一款兼容多种深度学习框架后端的深度学习库,支持TensorFlow, MindSpore, PaddlePaddle为后端计算引擎。TensorLayer3.0使用方式简单,并且在选定运算后端后能和该后端的算子混合使用。TensorLayer3.0提供了数据处理、模型构建、模型训练等深度学习全流程API,同一套代码可以通过一行代码切换后端,减少框架之间算法迁移需要重构代码的繁琐工作。 + +## 一、TensorLayer安装 + +TensorLayer安装前置条件包括TensorFlow, numpy, matplotlib等,如果你需要使用GPU加速还需要安装CUDA和cuDNN。 + +### 1.1 安装后端 + +TensorLayer支持多种后端,默认为TensorFlow,也支持MindSpore和PaddlePaddle,PaddlePaddle目前只支持少量Layer,后续新版本中会持续更新。 + +安装TensorFlow + +```python +pip3 install tensorflow-gpu # GPU version +pip3 install tensorflow # CPU version +``` +如果你想使用MindSpore后端还需要安装MindSpore1.2.0,下面给出了MindSpore1.2.0GPU版本的安装,如果需要安装CPU或者Ascend可以参考MindSpore官网。 +```python +pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.2.1/MindSpore/gpu/ubuntu_x86/cuda-10.1/mindspore_gpu-1.2.1-cp37-cp37m-linux_x86_64.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple +``` +如果想要使用PaddlePaddle后端,还需要安装PaddlePaddle2.0,下面给出了PaddlePaddle2.0GPU版本的安装,其他平台请参考PaddlePaddle官网。 +```python +python -m pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` +### 1.2 安装TensorLayer + +通过PIP安装稳定版本 + +```plain +pip install tensorlayer3 +pip install tensorlayer3 -i https://pypi.tuna.tsinghua.edu.cn/simple (faster in China) +``` +如果要获得最新开发版本可以通过下载源码安装 +```plain +pip3 install git+https://git.openi.org.cn/TensorLayer/tensorlayer3.0.git +``` +## 二、TensorLayer3.0特性 + +TensorLayer3.0版本主要设计目标如下,我们将会支持TensorFlow, Pytorch, MindSpore, PaddlePaddle作为计算引擎。在API层提供深度学习模型构建组件(Layers),数据处理(DataFlow), 激活函数(Activations),参数初始化函数,代价函数,模型优化函数以及一些常用操作函数。在最上层我们利用TensorLayer开发了一些例子和预训练模型。 + +![图片](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABrcAAAN9CAYAAAA5QCKTAAAgAElEQVR4AezBDXiUhYHv7d8zhDB8ZAgGzAQQnVTF0BoJb9I1obqHyVo1FF2SytaErmtg363AVrdEPF4M7duSnK5KVukS7PYQ3F1JtAihaA24XEyuY0uG08w6NlpjrGYoSpgAI8MEzBACz1vUaGaAkPAd+N+3Yf4ZIiIiIiIiIiIiIiIiIgOABREREREREREREREREZEBwoKIiIiIiIiIiIiIiIjIAGFBREREREREREREREREZICwICIiIiIiIiIiIiIiIjJAWBAREREREREREREREREZICyIiIiIiIiIiIiIiIiIDBAWRERERERERERERERERAYICyIiIiIiIiIiIiIiIiIDhAURERERERERERERERGRAcKCiIiIiIiIiIiIiIiIyABhQURERERERERERERERGSAsCAiIiIiIiIiIiIiIiIyQMQhInJemEQOtHD40G66Docwj0YQkUuDJW4ocUNGET98LFbbdcil41hXB5HwDjo/aeNo5wHMY0cQkUuBhUGDhxFnTWLIiPHED09Bzp8jn+zh8MEPOdKxj6Ndh8A8hohcfIYljkHxNuKHJjPEdi2DBo9ARK5sRz7Zw+GDH3KkYx9Huw6BeQwRuTIZljgGxduIH5rMENu1DBo8gvMtDhGRc+TY0QgHdv2Wg3saOBR8G/NYFyJyabMMsjJ89M0kJGcxcuztYBjIhXXkkz0c2L2Ng3v+m47QHxGRS1/ckFGMGHMLCfZbGTEmAzl7h4Jv0b57Owf3vcmRjn2IyKXPOjKVEWOmMDIlh/gR4xCRK8Oh4Fu0797OwX1vcqRjHyIiJ2MdmcqIMVMYmZJD/IhxnA+G+WeIiJyFo51hgi0v8/GfXsM81omIDEyDBo/gquvuJin1XgzLYOT8ioR38LH/FQ60/hYRGbiGjBjPVdflkXhNLtJ/B3b9ho93vEok7EdEBq4E+1+Q5PgWQxNvREQuTwd2/YaPd7xKJOxHRKQ/Eux/QZLjWwxNvJFzyTD/DBGRM7T/T6+x570XONbVQU+GYWGobTxDho9m8BAblkHxGBiIyMVlYnKs6zBHDoeJHNpDR3gXseKsV3H1jfczctztyPmx5901BP2vEGtQnJWhtnEMGZpE3JARWCyDEZGLzzSPcbSrg85IiMjBNg4f2kusoYk3cPXEIoZdlYacXuRAC3uaqzgUfJtYQ4YlYR1hJ96ayKDBwzAMCyJy8R071kVX50E6O4J0hHfTdeQQsUZdeyfJN30XwzIYEbk8RA60sKe5ikPBt4k1ZFgS1hF24q2JDBo8DMOwICJXpmPHuujqPEhnR5CO8G66jhwi1qhr7yT5pu9iWAZzLhjmnyEi0k/m0U5a33qW8O56ehpx1VcYefXXSEi6HsMYhIhc2o4d7aQ9+B4H9vyBQ6E/0VPiNX9Fytf+Hjl3DrfvpPWtnxM58AE9JSanYxuTxvDEaxGRS9+Rw2Ha9zUTanuLw5/so6erJxaRlHoPcmr7d/4XgT9U0lO8NZFEezoJoycSbx2FiFz6Pgl/RHhvE/sDb4Jp0m3IiHGk3Pw9hibeiIgMbPt3/heBP1TSU7w1kUR7OgmjJxJvHYWIyMl8Ev6I8N4m9gfeBNOk25AR40i5+XsMTbyRs2WYf4aISD8ciQT56I1yIgc+oNvwUQ7GXJPDUNs4RGRgOrTfz94P6+kI76Lb8NHpjJ9SgmXQEOTsHNrXyEe+pznW9QndEu3pjB6fzWDrSERkYAq1vcXeP/2Wrs52uo269i7skx5ETrTnvRcIfvArug2KszLm2m8wKmUKIjIwdR05RPDD7Xzc+t98wTAYn/EDEpK/jogMTHvee4HgB7+i26A4K2Ou/QajUqYgItJXXUcOEfxwOx+3/jdfMAzGZ/yAhOSvczYM888QEemjrs4D7PxdKYfbd9ItOdXJVWMzEZHLw96d29i3cxvdhl31Va79ugsMC3JmPgn+gZ0NZZjmUY6LGzwc+/XfJCHpBkRk4Dt2tJPAB1s4sOcPdBs14ZvYvzoH+dKe5mqCLRvplpB0I/av3EFc/HBEZOA7FNpB4P3/ojMSotv4KY+SkJyJiAwse5qrCbZspFtC0o3Yv3IHcfHDERE5E4dCOwi8/190RkJ0Gz/lURKSMzlTg/6/P0NEpI8++u+n6Aj9keMslsGMn1TAyKu/iohcPoaPnMCQoVfRHnyP44507KXzUCs2+61I/3V+0saH3p9y7GgHx1lHJHPN1+5jmG0cInJ5MCyDSEi6EQP45MCHHBc58AGGMYhhV6Uh8PGfNrH3vV/S7apxWYy94W4sg+IRkctDvDUR25g0Igd3c+RwmOPa2xoYcXUGcUNGISIDw8d/2sTe935Jt6vGZTH2hruxDIpHRORMxVsTsY1JI3JwN0cOhzmuva2BEVdnEDdkFGfCgohIH7W9+zyHgm/zKcPgmq8WMGKUAxG5/NjGpDH+pr+mW3i3h2DLRqT/Am//b7o6D3DckOFjuOar3ybemoiIXH5GT5jKmGu/Qbe9f/wlB/e+yZWuI/Qebe/8O92Sxn2dZMc0ROTyEzd4GNd89T6G2cZznHnsCLvf/gUiMjB0hN6j7Z1/p1vSuK+T7JiGiMi5EDd4GNd89T6G2cZznHnsCLvf/gVnyoKISB8cCr7Nx/5f023cjd9i2MgJiMjlK2H0jdhT/4pue5qrOdy+E+m7oP8VDgXf4jiLZTDjJs4gbvBwROTyNfqaHBLtt9Btz7vPc6Vra3qebrbRN3G1438gIpcviyWOsRNnEBc/guMiB1rY+96LiMilr63pebrZRt/E1Y7/gYjIuWSxxDF24gzi4kdwXORAC3vfe5EzYUFEpA/2vb+ObleN/X+wjUlDRC5/o8ZOwTYmjW773l+P9M3RI+3s++NLdEv+yl8xZNhoROTyl/KVbzJkWBLHHT74EcGWX3Gl2r9zCx2h9zguLn4E9uu/iYhc/gYPScD+lb+i274PNtD5SQARuXTt37mFjtB7HBcXPwL79d9EROR8GDwkAftX/opu+z7YQOcnAfrLgojIabTv8fLJx00cFxc/nDHX/SUicuW4+rrb6RYObKcj9D5yeh/vqOXY0cMcN2JUKonJNyMiVwjDYMy1t9Et6H8VzGNciT7e8Srdxlx7G4PirIjIlSEh6UZGjplEt4931CIil66Pd7xKtzHX3sagOCsiIudLQtKNjBwziW4f76ilvyyIiJxG6EM33ZLGfR2LJQ4RuXIMHjKSpHFfp1voIzdyeqEP3XRLGv8XiMiVJSHpRoaNnMBxRzvDhD6q40rTHvi/dB7azXHW4VeTmHwzInJlSRr/F3QLfbiVY0cPIyKXnvbA/6Xz0G6Osw6/msTkmxEROd+Sxv8F3UIfbuXY0cP0hwURkV50HT7AwT3/zXGGMYhE+2RE5MqTmHIL3cK765Hete/x0nU4xHHDbOMZNvIaROTKM8p+C93CAQ9XmnDAQ7dE+y2IyJVnyPAxjLjqeo4zj3XRHvAgIpeecMBDt0T7LYiIXAhDho9hxFXXc5x5rIv2gIf+sCAi0otD+35Pt4TRE7EMGoyIXHniraMYNnICxx3r6uDQvkbk1A7t/T3dEkZPRESuTLYxN2EZNJjjDu17i2NdHVxJDu710c02+iZE5MpkGz2Rbgf3vomIXHoO7vXRzTb6JkRELhTb6Il0O7j3TfrDgohILz7Z30y3EYnXISJXrhGjrqPbJ/ubkVP7ZP+7dBsxyoGIXKkMhideR7dP9jdzpeg48D7HuiIcN8w2nkGDhyIiV6YRoxx0+2R/MyJyaek48D7HuiIcN8w2nkGDhyIicqGMGOWg2yf7m+kPCyIivTjc/ie6WRNSEJErl3VECt0Ot+9ETs40j3K4fSfHDRo8lPihVyEiVy7rCDvdDrfv5EpxuP1DulkTUhCRK9egwcOIH3oVx3VFPqar8wAicuk43P4h3awJKYiIXEiDBg8jfuhVHNcV+ZiuzgP0lQURkV4c6dhLtyFDr0JErlzxQ6+i25GOvcjJHenYR7d46yhE5MoWP/Qquh3p2MuV4kjHXrrFDx2FiFzZ4oeOotuRjr2IyKXjSMdeusUPHYWIyIUWP3QU3Y507KWvLIiI9OJoZzvHDRo8FAwDEblyxcUPo9vRI+3IyR09cpBugwYPQ0SubHGDh9Ht6JGDXCmOdh6kW9zgYYjIlW3Q4GF0O9p5EBG5dBztPEi3uMHDEBG50AYNHka3o50H6SsLIiK9MM2jHGcYgxCRK5thDKKbeewocgrHjtLNsAxCRK5shmUQ3UzzKFcK0+yim2EMQkSubIZh4QvmUUTk0mGaXXQzjEGIiFxohmHhC+ZR+sqCiIiIiIiIiIiIiIiIyABhQURERERERERERERERGSAsCAiIiIiIiIiIiIiIiIyQFgQERERERERERERERERGSAsiIiIiIiIiIiIiIiIiAwQFkREREREREREREREREQGCAsiIiIiIiIiIiIiIiIiA4QFERERERERERERERERkQHCgoiIiIiIiIiIiIiIiMgAYUFERERERERERERERERkgLAgIiIiIiIiIiIiIiIiMkBYEBERERERERERERERERkgLIiIiIiIiIiIiIiIiIgMEBZEREREREREREREREREBggLIiIiIiIiIiIiIiIiIgOEBREREREREREREREREZEBwoKIiIiIiIiIiIiIiIjIAGFBREREREREREREREREZICwICIiIiIiIiIiIiIiIjJAWBAREREREREREREREREZICyIiIiIiIiIiIiIiIiIDBAWRERERERERERERERERAYICyIiIiIiIiIiIiIiIiIDhAURERERERERERERERGRAcKCiIiIiIiIiIiIiIiIyABhQURERERERERERERERGSAsCAiIiIiIiIiIiIiIiIyQFgQERERERERERERERERGSDiOGM+yjOmUPImJ5HHqj++ypzrucx4KDNycNHDfVXsXluInR4+qqbgmiJq6OEn9ZhLshmoQm9XU/qbNJY9lMEJPqqm4JoiaujhJ/WYS7IRkXOoq52m+m24tzRQ0+jHX9eIn25JpN2eStqNN5M3LRencyKO4ZyFZsq/UURJIycxlVVvLmdOKn3TsBojdyVRZpay+z/uws5pNKzGyF1JT6VbvSzOQkSkd7s2U5DmooY+SnbgnDQaR/o0nPfkkp+VhJXT2LWZgjQXNfSPIysLx3gH2TnpTP+Wk+xx8ZxakOoH7qRoAz3Moz5cTDYicllr2cj0yUupJVrG4tXUP5aOlT5o2cj0yUuppaeJLH7tOUqz4+lNZFcztXVbcW95i6bmFtzvBOnmyMoi45abyZuWi9M5Ecdw+iSwbhEpxW56Kt3qZXEW517Qj7uugVp3Hb7mXbgbWulmn5TFpCkTyXdOxTntFtKS4hERkctVI2W2Ylz0NI/6cDHZnE6Q6gfupGgDPTipanqSwnFIPwXWLSKl2E2fpabjvHYIjvRpOO/JJT8rCSsiX7JwprbXsuxNTqGWmt/4kctAqInqH+SSdnMR5fsiiMjF0E7TumfITZvGpG+5WLB8I+66Rvz0FKTp9QZqVq1mblERqSn3MH3JZpoOcGYatrGskVPYRk19K2dlw9OUbmpHROSS0ebHXddA5fInKcq9k6GTF1Huaed88Dc04N6wlrJHXeSkzSD3cTf+LkREoqXey4qfTSWWr6ySqhb6oJ3aFSupJZr94UdwZcdzSm0NlD9wD0PTiiiYt5qKDQ243wnSk7+hgZpVq5lbVERqyj1MX7KZpgNcGg74qX78IVIc95Fb/CTlaxpwN7TSU+CdBtxr1rCg+CEmOWYwqXgN7jZERETkUtLSiLuugcrlT1KUeyeOb63EHUTkCxbOSAT3yysIcGq1P6vBhwxcIZpeKCE3bRJFT7sJICIXxc5tlHzr20wqXoO7jX5opXa5i0l/uZTK9zrpn07ctb8kwKnV/tyNj7MRpOL7lbgPISJyaWpxU3Lnt5le0UyE8ymIu2IROfM240dEJJrj7+axLIsY25j7480E6F2krpI5q4JESZ5N1f/MwsrJ+dctJfWGhyjZ0ErftVK73MWkzIdw1bVzMYU8q8nNvI+iigYC9FWQpnXPkHvDPcxd4yeCiIiIXIoCr68md9aT1O5C5FMWzkSkntqfBujVm1W430QGqu0VTCosxx1ARC6WnZspyn2Y8teDnLGWjcyd/hOqd9J3kd9TWx6kV42bcDdydtrWULKikQgiIpeqILWPf58561o53wIvuihZF0REJNpE5i+bRzYxNjzNsrpOTinSTHnpGgL0lMT8n83BOZyT8q9zkVO8ET9nqK2BsnsfZsGmVi6GSMNq8u5cibuNM9RK5bzvUVDRTAQRERG5JDWsZc6P3QQQAQtnILSpmnJ6clL6k/lE81G1xccVaXwh600T0zQxTRPTNDGXZHPZGl/IetPENE1M08Q0Tcwl2YjIWTjUSNmDLqrbOIFj5jyqfv0Su3d7McNezLAXc08dLVtLKZ05lhO0baZoyWYC9E1oy2bK6SmL0sWziNZMVV0zZ8tX9gwVjYiIXBgzS9kd9mKGvZhhL2bYixn2Yu6uY//bVax/6l6cycQIUl38BJUt9M3MUnaHvZhhL2bYixn2Yoa9mGEvZtiL+XE9Lb9+hPxUTlDzzCZ8iIhEs2bMpvyxiUQLUr5kDZ4IJ+V/cSWuBqLYv/MYrrsTOJlIw2qKijcTINZY8heXUv92HR1hL2bYixmuZ//bq6la7MRBrEYq/sZFma+TC2rnZuYUrsRDrCScf7eIV72v0fGxFzPsxQx76fjjS7z61L04k4kRpPbx7zNnQxARERE5//JXv4YZ9mKGvZhhL2bYixn20vHha7yz/hHyUzlB4MVKqhoRwUK/hXC/UkmUOwop/Mc85hPN9y+1eBARkf7pxP3Pj+JqIEY6i9e/Rst/FFN4uwP7cL5kTcCRdReL/2Md7/zsLuzE2PA8VY30QTvuTRuJMu0uCr83lflE863Yhoez1UjJjzfiR0TkIhqeQOKEieT/wxK2bn+WxVnE2IZrxTZCnANx8Thun836/3yEbGI0buONFkREYsST/cgjLEwmWuNKSl9s5QS73LjKthHNSbnLiZ2TONRI6T+txEOMrFmsf/tl1j92F9kTErDSLZ7ECekUPvYkLW+XMieVGI24Stbi40JppXqJi+o2oiVPZdn/WcfWn80i78YkrHF8wZrsIO8flrDVu5rS25OIFqR6USW1QUREROQisY5MIu2O2ax/uZTCZGI0U/WbZkQs9Nf766l8jih59ztxJOaQ9xDRAit4tS7C6XiWGhiGgWEYGIaBMauaAH/WFaJpUwULZuUyKcXAMAyMG3LILS6jenuAXm0vwzAMDMPAMAwMo4Dqj/hU6N1aKuYVkHtzCoZhYBip5HxzLmUveAh0cfY+qqbAMDAMA8MwMAwDY6mH0wlsr6Z8XgG52akYhoFhGBg35JA7awHlL3gIROiTyEceqp9aQME3c5mUYmAYBoZhYBip5Hwzl7mLKqjeHuBkPEsNDMPAyHZxgh/mYBgGhmFgGAVUf8RnPqqmwDAwDAPDMDAMA2Oph9OKBPC8UM6CWbnk3GBgGAaGYZCancvcRZXUvhvitD6qpsAwMAwDwzAwDIOy7Xwm5Kf2uRLmOieRYhgYhkHKzbnMXVSJ+6MIpxfCX1dN+bwCcrNTMQwDwzAwDAMjZRK53yxgwQ8rqX03hMg51bgW1/Ig0Say+LWfU3pHEr2LJ+3vfkjVw0lEa2bZpkZOq8VN5Rqi5N2XhWNkOnlzidb2S159vZOztmUlrg1BREQuCUlZlD79CNlEC6xay/oWzp10J3OmEaOBQBARkRMNz8L1s1nYiVZbtpKaXfTQifvnT1DdRpS8nz1C4QROyr+hkrJGoiXfRdVzi8ifQO8m3MWql0vJJ0bDM1S80s6FEHn9VyzcQIx0SqufYmFGAr0amc7iXz7F4nSita3FtaYZERGR3kReX0mKLRPDlolhy8SwZWIsaSBCb9qpmZeJYcvEsGVi2DIxbM/gjvAFzxOZGLZMDFsmhi0T44HNBPizrnaatqxlwQMPMemGTAxbJsbkYnLnraa6IUh/BBo2U/6DReRMzsSwZWLYMknNfYi5SzZS29JOr3ZtpsCWiWHLxLBlYtgyKWsADjRT+YNF5EzOxLBlkpq7iAX/1kCgizM34S5KF2cRy/f7XQToTSeBhs2U/2ARubn3YNgyMWyZGJOLyZ33DJVb/IS66KN2/K9vpvwHi8jNvQfDlolhy8SwZWLccB+59y5iQdlGat9rp8+6gnjWrWHBA8Wk2jIxbJkYtnvIuXcpJWu24T+A9IGFfvL/poZaesoj/zYHkEjO3fOJFqBsUz0R+q9jRy0LbhvFpLwFVLzkpinAZ9734H7ORVF2Cjnfr8HfRd91+an9fg6j0qaz4Nka3G8H+Iwfz5ZKXIU5pNy2gJodXFj73LicKaRkF1HybA3u7X6+8L4H90sVlBTmkOLIxbUlxKmFcP8wF8c1ORQtqqBmi5umAD348WxxU/nUAoqyU0idVYHvIBdBCN+/FjHJkUJOYQkVL7nxvM8X/NvdVD41l+lpo0hxllCzg37zbyghNy2V6cXlVNY1EeAzgbfdVD41l9xrHEx/2keEU9hRQ4kzjVRnESXP1uDe7idKoAn3lhoqls5letooUmdV4DuIyDnh2fQ8HqLZH34EV3Y8fROP88F55E3KYs7Dj7Cq6lneeLsO/8J0Tsdf76aWnqaSnzMWSCDnjllEC1K25fdEOFtBqhdVUhtEROTSkH4vJbOJsY3aN4KcO0MYmoiISJ8l3v0QlXOTiNK2mfk/byDCZyK+NbiWB4mS9QilfzeWk2um5ufbiJW3eB6FE+ibCXfhWjqRWJXrthHgfGvH/avVBIhmf/ghFmbF0yfD0yl57F5i+VZsw4PI5cmz1MAwDAzDwDAMDMPAMAyMpR5OxbPUwDAMDMPAMAwMw8AwDIylHk4uQPUsA8MwMAwDwzAwDAPDMCh4IYDI5cB6+zdYkEy05duoj3BqBxpxryGKfeFUcqz0qmPnNhbcOY1JBU9SsaGBpjY+09KIe81KinLvJOdRN/4uehdspPyBe0jJdVGyyo2nhS/4GxqoXL6U6ZO/Te7jm2k6RN+FGinLL2LuKjeeFj7lb3BT8ehDLPtNJ2fD8dUsTnC4k1MJ+dZSdOsMUnJdlKxy425o5QstjbjXrGFuwX2MSnuIklda6dVONyXf+jap33JRssqNu6GVKG1+3HVuKp5YyvTMaaQ+sBbfIXoV8qyhIPNOcoqfoWJDI366teKp20j5vIdJzXyIknV+IkhvLPSLH/cLtUS5Ox/n9Xwq8bY85hPjqRrcIfon9Cqu+6dTsZ1eef61gJziavz0RYhXf1jE9H/10KvtFRRkF1G9gwtjRzVFN+dSVhfgtAJuyr6ZR9n2CCeK4FmaR+5SNwH6xv/SAvLm1RDgQvJTUzyFKd+vpinAaQXqyinIzqVse4S+emNlETn55bgD9CJA7Q/ycG2JcIKPaijKLqC8LkBf+V9aQN68avyInK1G3KuCRJtIyX1ZWOmH1Ht5dfuzrFo6mzkzssiYkIA1jtNoxf3SNqLc4cSZyqcSc6YynxjL63Af4Oy1rWXOMw1EEBG5FCSQ43QSq6b+XUKcK4fpCBEjCeIQETmFBPIWPkZhMlECy5+l3NcJtFJVuhIPPaVTumwWGZxCy7vUNhLjXubMGEt/ZMyYRR4xNjTyxgHOr8i7uFcRYyIl92Vhpe8S77iLxclEa9tKfSMiIiK9SCdvwUSiraHW08mphOq3UUFPSSzIy8JKL0K/xfXgw1Q00CvPvy0iZ95m/JzCTjdzc4sp2dBK74K4K1xMumc1nkP0yfofP4GrgZOYhXNKPBeKf8NSpvzlk1S/E+S02hooL3qQ3PJGIpzELjdFuYsofz1IX/k3PEneP23Gz8n5Nyxlyp3PUNNC79oaKC++D+cTjUSQU7HQH2/WULGFKPnfnY6DzyXmkPcQMSqoqQvRL1uqqd5OnwSeL6LoaR+n56b6eQ99Eqim6P5yfJxnER9l9xdRHaAfPLhmllC7jyiR7eXM/6GH/go8X0rVm1wgETxLiyh4zk+/BNy4Zs6hegd9UvN8NQH6IkD501X46SmCe+V8qgN86db5rPK00HHExDRNTLOD/f6tlM6w01Pg+SLKNoQQOSstftxtxJhKTjrnX6Obijqi5N//DRx8bmQ6eXOJsZaa19vpr8U/fYRsogWWP0u5rxMRkUuB/Ss3YSfG7/z4OTcinl9RUUeMXDJSERE5tXFOSpc6idaIq3QT7nUrcW0hSvaPHmNhRjynEmpuxk2MGVlkJNE/qTfhTCfGWnzvcX6914ybWFPJSad/rDeRM4MYzdS/F0RERKQ3GdPuJoNoVa//nggn0079lrVESf8ueVn0rm4z1Q30SeBFF0UVzZyomfIHF1HZQt81rCR/yTZCnJ6vsZmTmjuVnJGcFf8fGjhBUgJWokUaVlP0wEb89EcQ948fZc66VqJ14l71BNVtfClrFqu2vkzHx17MsBczXM/+t5+l9O4kegq86KLslXZO0LiGogc24qfvPGWPUrKpHTk5C/3g21KFj57yKbjNzpcScc5cSKzKV9yEOAP2PErdLXQcMTFNk44P61nxoINYnh9UULOPPrKT97+20tJuYpomZsdu6n82BwcxtpdQsSHE+eR/wYVrO9FunU+Vbz8dpolpmuz3VTH/VqIFKih93seXQrj/04WPnhzMWf0G+ztMTNPENDvY37SexbcSw0d9U4Bu2UtMTNPE9JRygp/UY5ompmlimuspHE+/RLaXM/+HHqLZcS5Zzzt7OzBNE9PsYLdnFQun2YkSqKZoXiV++spO3v/aSku7iWmamHvfoOofsznBJje+AD004XkuQE/58xYy51YH1jg+ZyXxOieLq6tYfKuT/IcWs6pmKwSlSlwAACAASURBVG/497NiZiIiZ6VtF25izEzFwfnnq9uEj56cFGQn8aUEnDNmE6tyUwMh+mfo12dR8aN0ojXiKlmLDxGRS8DVY8khRmOYCGejk9CBIE1bVlPwt6vxEc2+cBrOkYiI9MrxnUdYdQfRtiwlt3gzAXpIL6b8exOxcmr7D+zjBJPG4qC/RuOYxAne+FOQ8+pQGB8xZqRip78SSBmXRKyaD3YhIiKXo5Xk2DIxbJkYtkwMWyaGLRPDlolhy8SwZWLYMjFsd1K0gd6lO5l/B1ECaxqoj3CiA43UriJKxn1ZZNBHyVMp/fXLdHzsxQx76WhazYrZY4nleXwtNUGiBNZVUtJAtKxZVP22jo6wFzNcz/7fljI/iyiBVSupbOSMzb8jnUTOws7NuMoaiOVMd5BID5FGyv9pJR6i2W8vZr23jo6wFzPspaNpNavmZ2GnpyDVxU9Q2UIPfjxrgvSU//ezmZM1Fmscn4sncUIWi1eXsjgri/y5xayqepY33q5jxYwEogWpLn8GD9Gy/6GUN/z1mGEv5p463lg9i2x6ClJRthEfcjIW+iriZv2/+IjyYBF544livS2PhcR4roraj+infKo8r7J4mgNrHJ+yjs9m/uqtVH3XTrRKKl/x0xf5/1nPq487cYzgM1Y72f+4iq3VhdiJVvm/1+PnfPFR87NaotgLqXphBYWTE7HymcTJhax4fhV5RPP8pxsf3RLJ+5f9tPi2sn71Yubfl03ao6tY8WAGiVY+ZyXxpnxcP1lILH9gN+dfiNpfuPDRk53C6nq2/iSftNFWPmPFfusclr1cQ+mtRNvkorIuQl/k/aKeVx934hjBZ0ZnUPizCpZNJkYT/gA9RCBAFP8f/YQ4iRFOSj1bWb+ylDkznWRcl4gVkbMT2LWDiyLSwPoVzUSZfRd544hizZ7KQmKs2UztLvopnozvPcTCZKI1PIPr31sREbk07cC/i95tcJFiy8SwZWLYMjFsmRi2TAxbJoYth1HX3MmkgpXUthEt+S4q5mZhRUTkdMYy50ePkE1vkli4dC7Zw+lV4E9uThDHGUgi5StccIFdOzhBHAyl/1KuvQUREZH+G4tz5lSitG3E/btOYoXqt1FBT1nMnzGRvnFStXU5i28fizWOT1nHpTN/5c+p+k4S0TZSuamVL7Xy6vNuoiTP4tW1iyhMT8DKcfEkpt/FiudKyaenZpZtaCBCHyRPpfTXr9ER9mKGvZhhLyvuTqD/OgkdaMWzbiXTc11UtxFjKoW3j6Wn0JaNuBqJYv9OKfW/nkf+jQlY+Yx1XDpzfrqcmsXpRNuG6/kGInQ7DG1E8X+wixAnMTyL0q3Psv5f5jFnRhYZExKwEqPlt1RuIIp97nJqn7qLjKR4PmVNIOPbi6j6hZMojc+z/vVO5EQW+ijicVMZIMqcGU4SiWHNIe9RYtRQtcVPf2Q86aLwOk7CQeEPSsggWm1DEyFOY/IyXN91cDKO+0somUy0TT6aQpwfb7qpepMo9nkLKLyOE13vpOi72TgfXMiy1evZ6mth/28WkkEP1kQck53kP1jKirX1vPOkEysnsiaMIpbvYITz7qNaqp4j2t2llN7v4KRGZLP42WVk0FOAsk31RDidhSz8roMTZZAzgxg+IhF6sGKdTBTf0lzSbi6i5LkafDtCRBC5lDRSZsvEsGVi2DIxbJkYtkwMWyaGLRPDlolhy8SwZWLYVuPh5CK/a6CyjShz7s4ikRjWW8h7mBhuqupa6bfhWbh+Ngs70WrLVlKzCxGRK0fqvax69Yfkj0NEpG/SZ1Hxo3ROxT73h7imxSMiIiLnn+Pue5lDT0Eq635PhJ7aqd+ylih33IUzlT7JWDqHwgmcxFgKF3yXDKLVvuEnxOd2NlJbR5SMBfeSl8SJJjiZ/zBRAmveoonTy//pD1l8exJW+q6m+E4MWyaGLRPDlolhy8Sw5TDqmnvIKV5NbRsnyP7RPIpS6SFI7bqNRJtK6f+8CwcnE0/2Y4+xLJ0ogfJt1Ef43BCs6UTxPfEQabe6KFnjxreznQh95//dNtz0NJGSv5tKIidy/PV9LKSnIJUNfuREFvokhPulMgL0NIe82xI5kZWcOxYSq/YFN376Li87g1OanEMBMTb48HMaM3LI4FQyyMknxnp873NehD704yNaweQ0Ts5B4X/Ws3X1MhY+mI9zsoPEEfTdwRD+N2upfmoB0/9+BRdDpOkNaojmnOnEQS8mOymwE+15D02cxswpOKyclHWond5lkP/9PGIF3q6mvLiAKY5RDDVSyZm1ANdzNfh2RBAZ+Npx/2o1AXq6l7ycBE4UT45zNrFqX2rAT/8l3j2Hiu8kEaVtM/PLtxFCROQyl+ygcPGTvPN/ljDnxnhERPounozvPUJpFidKvouKhVNJRETk5LKXmJimiWmamKaJaZqYpom5JJtTyV5iYpompmlimiamaWKaJuaSbE7OTuFaE9M0MU0T0zQxTRPTNFl/vx2Ry0pSFnmziRJY00B9hC9F3sW9iij5938DB32T9/WJnFJ6OgXEeKUZP5+JfPAuNURzjBvNycUz6RYnUdrepWkXpzGLOXckcb7Zv1NK1cKJWOkh0sIbG4g2zYkzlV5MxDkziWib8DTxuYnkf28qsQLvbKZ83iKmfG0aQ233kPPAk7jWuPHt7OTUOvH/wU20caQkcXLWVKbMJEqg0U8AiWWhL/a5qXmWaA/m4RzNSVlvy2MhMbZUUPMmfZTPpOvohQPHfUQLQITe5ac56I3j+nyiBaCL8yJycDexUsYkci5EPvJR+0IFJcW55NxgYCSMIjVjOkWLKqh9O8DFENrnJ5bzZge9S8FxG9ECu9kdoncjrAzl5FKuy+F0HA+uYP2DDk7Nj+elCsqKC5jiGErKzUWUbWgihMjZs197EycIHaaD8yjYQM0qos2eijOJk7JmT2UhMerWUtPIGUgi3/VP5BMtsGolyzydiIhcWsZiT+Ls3XgXq35bR8cfX6LqMSdpIxER6b/h6Sz8n7OI5XxsDvnj6BN7chYn6OIMBNn9ARdcYtJYTtAFHfTf7j/9HhERuVLMoz7sxQx7McNezLAXM+zFDHsxw17MsBcz7MUMv0bVTPogAeeMWURp24jnLb4Q8WyjnJ7upWhaEn3jZNIEejEOx0yitUGEz4SCrcSqKb4Tw5aJYcvEsGVi2DIxbJkYtkxSit1Ec+NvpXfTJpI2kvNoLPlLn8W38i4cxAgG8RMjx4GD3qVcewvRguwOtNPNMfsx1s8ey6m14tmwlrJ5i5jytRxSbnVR9oqfELHaCewkhpuitEwMWyaGLRPDlolhy8SwZWLY7qRoA9E2tOJHYlnog8CWKiqJ8VwBowwDwzAwDAPDMDAMA8MwMIbmUk4sH1VbfMj5E3q7mpK8VIZeM4XphQsof86N530uM7sJHaR3N6Rg52w4yF/9Di21yyj8mp3TCbxdjSt/Emn3lOM7iMjZSR5LPjHqmmk6wHkTqNtMJTHWLGKULRPDlolhy8SwZWLYMjFsmRhXP0Q5sZqpqmvmjEy4C9dP04nWTJlrDb6DiIhcHHtaqSdGso1RVno3s5TdYS9m2Iv5cT27vctZNnMsUd7bzNyCRbg2tSIicjasiaOJlZhoo69GJTs4we/8+OmvfTTVcYIp1yZxPlmTRpNBjFfexR+hn4L4m4PEyv/KOERERPoicdo0FifTQ5AV7kY+00m9ew1RZk/FmcTlI3EIQzmXxpI9LQvnzFksXrmcd/wvs/7hLOxxnFe7D3XypbHkr1xHy/pHKJyUxOkE3tmMq+g+0v5mDb5DyAVg4bT8vPp8DeeCr9qNj77ws3sf55w/sJvLVWR7GXk3F1G+yU+U67PJf2gxK6pfpb5pNx3uUgY2B/bRXABWHHcvpOqt3XR8+AbrVy9m/ow07Jxa4JUS8n7iJoLIWZjgICedGGtxv9HJyaWzOOzFDHsxw17MsBcz7MVsKiWfvmjl1RfcnAu+lxrwcWYy5jxGaRbRGlYyv/S3iIhcDKHAPgLEmHYdKfRDXDz2G6ey8D+q2LownShtDZT/zYMUrWtFRORiSZw4EScx6ty4W+ifxgbWtxHjLtImcH7dOBEnsTbh/l0n/XLgXdwvEiOJKV9JQkREpE+st+CcnURPgVVePPxZ5PfULifKwvu+QSJ9tYvdQS5tE5JIpP/yV7+GGfZihr2YYS9m2IsZ9mKGX6Z+47Ns/Y9FlM6eSloSF4QjKYFo8TjumE3V9tfoaKpi/cpi5t/twM6pBTY9Q94/NxBBzjcLp/O+m+pNnBtvLmN9XYTT8+H/MMSp7cb/G6JNtmKld74/7ibEqe3eUU+0DKxWzgvriBRiveEPcCqBl+YyyVnAgh9WUlPnwx+K8IWIm9KZLjz0cOtiXv2wA/OP9axfWcr8+/PIvsmOdSgXReJoB7Hcb/npnR//b4g2OYVRVi4o6/gM8h8sZcXL77D7SAe7m+pZv3ox8+92ECvwVCW1AUTOwkSc900kVvm/uwlwHrQ0UL2Fc6Pxeda/3skZsU5kYWkxGUTzNDQiInLhdfJG/VpiOXNuws6ZSMD5o1KqvpNEtCDVxS7KGjoREbkoUm8iL50Y26h4pZm+68SzaRM+Ysz8BjnJnF/Wm3DOJUaQshd/S4i+C2zZSAUxkv8GZwYiIiJ9FI9z5nfJoIe2X+JugIhnG+X0kFxMXnY8fdeMf1c7p7YPfz3R0uOx8hnr8NHEWvyaFzPsxQx7McNezLAXM+zFDHsxw17MsBcz7MUMezHDXhZn0bsRQ7BykSQl4SBGvR8/vfN/8HuiTSQlKZ5TsY6bSP7seaz45Uvs/rie3d7VrF9ZzPw7xhIrsHwjtW18Lp7EJGIUUx/2Yoa9mGEvZtiLGfZihr2YYS9m2IsZ9mKGvZhhL2a4mGwkloXT8G2owM25EqBySz0RTq9iSz0RTuH9N3AHiJadhoPTeLaW+oOcgp836gJEyyHtOs6LxLQMnESr8b1DhJOJ8E5DJU11NVQsnUuBcwqpo4ZSsiXCcZHf1FIWIMrCn7jIG28lVqR9PxeDNW0K+URzb3Dj59Qi292sDxDFfncGaVxEcVbsN2WT/2ApK2rfYeujdqJV07QDkbOSMWMWecTY8DQL17VyrvleWYubcyVIZd3viXBmrNlzWfZwEiIiF13wt1QvJ8ZUCm8fy5kbS+GPHqMwmRiNuAp/Qs0uREQugonkf28qsXxLnqCsoZO+iDSsYWFZM7HmfHsqds63BJx/XYydGGueYP66Vvpk52YWPu4mVsaCqWQjIiLSD+lZFKXTQ5AV7gbc7jX0ZJ+dRY6VfqlwNxLhFFrexd1GtK87cPCZxIkTcRLN09zKZcOaypSZRKtz427h1CKNuF8JEiV5Khk30jdx8dhvTCd/9jxWrF/H1oeTiLaZpp18LoG09CyivcU7LchZstArH+5qH9EWsrXDxDRNTNPENE1M08Q0TUzTxDRNTNPENE1M3zIyiBb4aS31EU7vX0sp3x7hRAFq/sWFm2j5t00hkdOpoPRpDxFOFNhQjmsL0e7LYUoi58f1U8ibTLSnyql6nxN9VEPFU8SYjzPLynGhfX5ihQ5GOJGfmueruCjGZ5N3N9E2uXC94OekDnoo/ycXPnqyM+eOHKycTxFCgSY8GyopXzSX3OxJlGyJcHJWRo1JIZod4hA5O6l341o8kWhBqotdLHilldNqa6TymbXUcDrNuF9qJtpstu7xYoa9mGEvZtiLGfZihr2YYS9m2IsZ9mKGvZi/fYQMogXKt1Ef4QzF4/zeYxQmIyJy8XS1Uv34E1QSzT53FgWpnJ1xTiqr55FNjLbNzP/xZvyIiFx4jpmzWZhMjEZchT+h8r1OehPyraGgcCUeYmQ9wvwZCVwI1tv/mtI7iBGkutiFq66dXu10s+BBF9VtREueRensiYiIiPTPRPLnZtFTYNVSFrxIDxMpmZmFlX76t0rKGzo5UZCaFStxEy0/5yYS+VzqTeSlE8VdtpbaA1wmksi+YyrRtuH65834OZlOPMufwdVIFPvsLHKsfK6TUJsfzysbKV+ylNzc+yip6+Tk4hk1ejTRkiCOLzimTCWDnhpwPbeNEHI2LPRmey3L3iSK/fE8cqz0zWQnRZOJUU7NlhCn58E1czolL/gIRfhUZJ+P6u/nU/BsgGhzKLrDTl94fpjP9B9U49sX4VOREL4XFpCfX0GAaHPuz8PO+ZJB/vfziFbL3NumU1bnJ9LFpyI7anH97UJqiGZ/PB9nIqdUOa+IsroAEY6LEHq3lvJZuRQ9H6BP7A7yifHWO/gj/FmE0A4/gQj94KDonxZip6cA1YU55P6gGt++CJ+JENheSck9+bi2E+2+chZMs3I++Z8rYFTKJHLy51LyVCXu7U2U/+10Sl7wETjIlw768Ty3gPmLfEQrIvtriJyleLIXPEZpFjEaqSi6h9QHVlPd0EroEF86FKTp9c2Uf/8hUm4oZu6/NXJaDdtY1kgU+8Kp5Fjpm/QsitKJsYaaunbO2DgnpUudiIhcaJEDrfheWcvcv36QoheDRJtK6YKpJHL2rFmzKV+cTqzAi0/jWteKiMgFNzyL0l/Mxk6Mts3Mzfw205dsxLOznQif6+ok8F4DlUseZspfPkNtGzHSKf3nWWRwoYxlzlNLyCNWI2X3fptJxWuofS9IpIvPdRLa2UzNcheTchdR0UCMJAp/Opu8JERERPrNcftd5NFDWyv+Nr6UfjfOdM5AI67ChylZ10wowqciwWaqH32UglVBot1L0bQkvjSR/LlTidK2hun5T1LZECTC5yLtNL2ykpzJxeTOe4byNW487wUJRbjkOWbOZmEyUQIvusj51jNUN7YT4TORXY1UPv4w+WWNRHNSPjcLK5/xr3mUUTfcR07RUkqWb8Td4Kf8/32YknXNBA7xpUOteNY8yfwlzUS7m+w0vpTuZP4dRAksf5i8Rzfi2dVJt0jQT82Pi0nNfYi5S9ZQ+UojTW3tRJCTieOUIrhfXkGAnuzMuSMHK32VgbMwA9700VPFBjelM/JJ5DQCbsoLp1BO7/J+sZj80fRRAPfTRUx5mt7dvYrFMxM5nxz3l1L6i1pc2/lSoBaXMxUXvbAXUjHPiZXP2NNyyKAGHz0EanE5U3BxhqxWrMR4aS6pL83lM/lUfbiewvH0mfWOEiq+W0XB8wG+FMD9dNH/zx78wGVBGIj//zyPCKhgqKiPpissV6gzsXOKLVtPui2s6ydmt7DdVbi1pHXddPX9/bTud4W31zXtNn9at/umdr8Nu5ul35tBayZddYF+19SWSduauKnxpKgkKiAo36iRgIpg/nvg834z+p9pXSiLvMezCHF2pUyby5yFBczbxDGRQhZkjWYBp5b+j9MZH4/02fUYyZxlj1P6lw+yZCvNlK56kumrnqS9Uu64lBQaHaaw4D+I0FQfsq+/inja6grC066A3/yWphav/hW5N4ZJ4vSkfP0Bnl5RyIw1SNKZtWouA1bNpX36kLX0IbKHcIbEkn7fA+T+8m7m/oom9rD8/36Sqem5ZF7MKTzJ+J5P0i5Tcin7t68RQpKOF3/9TAoXlhO+/xdEaOp9Cn70GAU/eoy26UPW0lzmjImlrebe8BfMpT3C5JU8TtbFHDPkFvL+cycZtyylmKb2UPLcD5n83A9pq/Q5P2DJrQORJOm0DPkS06dAwSpOKO32MaRxmj74FQvuns4CWpexMJvMPjSTMi2bOU+/wbzfcMyvfsaMG37GDE5g628o/CkfC834ESVPXEMSF7AeY5j9+NfI+5tfEOGYyGs/ZfqXfkrr+pC19AGyLuZTKVOymfMvbzDvNxzzwa9YcPd0FnBq6X9/I+PjaWIg02ffzeI1S9nIMcU/fozxP36ME1nyq1+xhAZ9yPmP51h0YyJqLsjJVBdR8P0IzYSyCafH0x5pk6aTRgvL8ijYQavSrw8T4tRC38hj0TdTaJNxYcIhTi2URd6T2aRwlsWnMefZPLJCtEMK2QtzyRzEMaOyyb03RJtcnk32N0I0s7GUUloIpRG+kVaspHQH7RQi88lCnv5GiHYJZTD/xSVkXcrZl5DO3GXzyQjRbqGb57P4O2nEI50hnwvz9Nql5N7Yh8+k/xhmLV3BhifDhPiz6rcoWLCHZvrfQviLsbRH2vU3kkYLP/0FBTv5DAaS/fcPkI4knW99yPj+QpbcOpAzqsdIZs2fSTotfPALcv6hkAiSdK7FknpnLkV5t5HOaeo/hjn/+Rx5tw7kfEi6fiYFrz5ARn9O00Cyn1xB4UMjiUeSpNPVh4xbb+HEriHnxis4HekTxhDi1EJfz2XRnQM5To+RzP3nmYT70z5jZrLysWtI4sIXmvIIhQu/Roj26EPG9xey5NaBNNNjJHOffICM/rRb6MYHWPztK4inufj0GSz++zGEaJ/0OT9g/o2J6HhBTqLixeUsoLnQzMmE42mfUWGmj6KFleStKaU1k/8xn8Kl2aRwcunfeZ6ipVmk0EYZueS/8jTZl3Ny43J4vjiPrEs5Ny7NIu/ttcy5PsQphTLIfX0DT09LobkkMh4v5Om7UmhN6OZcioqfZv60qTSzqpCN5bSQwvRHcknn5KrqaL+EVLL//xI2LMwiNcQppUybT9Hb+cwaFc+5Ej9qFvnFzzPr+hBtEyL8d3kULp9FWgLSmdVnJHP+4zm25M0kcwjtkjLmFnKXLqWs5Cnm35pCEsdUrPkFC2guNONLhONpn5FjmD6SFgrJe+V9PpORt7HgoSuQpPMlNOEWFr30HPk5VxDPmRefdgeL/34kLUX+/Z+YtWoPknQ+pNz8IEW/f4r5t6YQoq36EM55mA3rniL3+kTOp6S0O8h/cwV5f3sNKbRdypQHyN/8HE/fkUI8kiR9NkkTwuRwApPChIdwWib//Y8ofPIWUji59Hsep+jJr5HCicWPuZv8/FxmTehDW6RMeZANK+8mvQdRIpbUO3MpefVBsob14ZSGhJn/0nPk51xBPMeLH3kH+WsfZ9aEPrRNH8I5uRQuvYO0HpxALOmzfkTh0jsI96cNBpL5gzwKHhpJPDqRICdUQeHqJTQX4r5J6bRfGuG/TqOlghWFlNKaeFLvepqtZWtZdG8m6ZfzicvTybx3Ps9v3EfRwkxSYmiX+CuzebqkjLULc8gcl8InUkiflsP8lRvY9/oiMi/l3EoOk1tYRllxHvPvzSQ8LoVPXZ5OeFoO85cXUbY9nzlfSuKEElLJXrqFssKnmXVXmNQQnwilEr4rl7ziMkp/Pof0ZEhKG08mTS1hyapSWoofN4eisrUsujeT9Mv5sxCp12eS83geU6/kNCWR9p08tpSWsWHl08y6K0z65XwqZVyY7O89TX7JPrb+bBbpyZx7l2Yyv7CMfSX5PP1wDpmTwqSGOCaUSnhSNrMW5lG0vZS1T2SRmoB0liSSevPdPL+piH1vPkXeD+4g+/oxpA+hiT6kThhD5oy7WbT0KbaUFrF17cPMuXUkoRhaqKTwxf+kuT7cFx5J+11B+PYraKlg1a8o5bOIJf2BB5jVH0k6N/qnEL4+TM5DD5P/5iuUvfAwOemJnD2xpOU8RO4YWtjD8gf/mZU7kaTzo/8YZi1dQVnpCtYufZBZd4whPGYgTaWMGUP4jjtYtPQptpS+xNrv30JaHy4MF6WQ9diP2LrrJTbkPcycGWHCE1IIcUxo2BjCU25jzpOPs6GkiK3/dgcZn4tFkqQz4qIxZM7qQ0uZt3+JFE5XLKl3PMzW3z/Fohlh0ofwiSEjyZzxAM//9ysU/SBMSgytiv/815j/wmrK1uYyf0aY8JiBHDOQ9OvD5DyWS1FJEVv/7TbSLiLqJKXdRt661ZT99+M8/be3EB4zkE8NGUn4jjt4+vkV7HvzcWalJ9Kqz4WZ/8JL7HvzRzz90G1kXj+G1P4c0z+F8PW3MOsHuRSVrGbt979Gag9aEUvqrQ+w9u2XKFr6ADlTxpA+hGOGjCQ85TbmL11K2a6f8/w9V5CETiZQ/xHOs+LHAox/hGZyi+uZM47Tt24egfS5NPNoEfUPpyOp7Upe/CsaxMQmMPSLM5HUuZX89+M0iInrxdDwv6DjVe37HdvWPUyDxOQrGHTlLUjqvKoq32fbWz+lQWJoLIPSvktnULb5X6nYvpYGg4dNJaH3ZUjqvMree4mKyFs0GHz1gyT0uxpJF4ayzf9Kxfa1NBg8bCoJvS9Dn9VhCv/hZm5YsIdjbuH50ofJ7MMpFf/TXzB+Hs3krn2TOWOQOqSy916iIvIWDQZf/SAJ/a6mLYJIkiRJkiRJkqTPbuuLLFiwh2buuIZwHySdQUEkSZIkSZIkSVK7VVfzZ4ep+M0vuO+bj1FAc9k3jiEJSWdSDJIkSZIkSZIkqZ1+w4J+dzOXVoycSfakRCSdWUEkSZIkSZIkSVI7JRO6nlb0IWfONNLjkXSGBZEkSZIkSZIkSe0UR7ckTqIPGd9fyPwbE5F05sUgSZIkSZIkSZLaqQ+p48eQ/tavKN7Kx0LDxjB50tfIvCtMxpBEJJ0dMVwA0h+up/5hzqxxc6ivn4MkSZIkSZIkSWdD2j1PUXQPZ0z6Q29S/xCSTiGIJEmSJEmSJEmSFCWCSJIkSZIkSZIkSVEiiCRJkiRJkiRJkhQlgkiSJEmSJEmSJElRIogkSZIkSZIkSZIUJYJIkiRJkiRJkiRJUSKIJEmSJEmSJEmSFCWCSJIkSZIkSZIkSVEiiCRJkiRJkiRJkhQlgkiSJEmSJEmSJElRIogkSZIkSZIkSZIUJYJIkiRJkiRJkiRJUSKIJEmSJEmSJEmSFCWCSJIkSZIkSZIkSVEiiCRJkiRJkiRJkhQlgkiSJEmSJEmSJElRIogkSZIkSZIkSZIUJYJIkiRJkiRJkiRJUSKIJEmSJEmSJEmSFCWCSJIkSZIkSZIkSVEiiCS1IhCMoUH90SNI6tzqj9bRKBCMQScWCMbQrxyCkgAAIABJREFUqP5oHZI6t/qjdTQKBGLoLALBGBrVH61DUudWf/QIjQLBGCRdOALBGBrVH61Dks61+qNHaBQIxtBWQSSpFV1iE2lwpK6K+vojSOq86moP0qhLbE90Yl1iE2h0pPYQkjq3usMHadQlNpHOokvXRBrV1R5CUud2pPYgjbp0TUTShaNL10Qa1dUeQpLOtSO1B2nUpWsibRVEkloR260/jQ4f2oukzuvwob00iu3eD51Y1279IBCkQU3VXiR1boer9tIotns/OovY7v1odLhqD5I6t5qqfTTq2r0fki4csd370ehw1R4k6VyrqdpHo67d+9FWQSSpFXE9L6FRVeX7SOq8qirfp1Fc4iXo5OJ7XkqDo3U11BzcjaTOq6qyjEZxiZfQWcQlXkKjqsoyJHVedTWV1FZX0KBrt7506ZqApAtHXOIlNKqqLEOSzqW6mkpqqyto0LVbX7p0TaCtgkhSK7r3upJGB/aVIqnzOlCxjUbde12BTq57rytpdGDfViR1TkeP1nGwopRPBOje6wo6i/iel9IlNpEGVZVl1B2uRFLndGBfKY26905F0oUlvueldIlNpEFVZRl1hyuRpHPlwL5SGnXvnUp7BJGkViT0HUWjyj2/40jtISR1PjUHd1O1fycNYmIvonvvYejkEvqm0Wh/+btI6pz27y6hvr6eBgn9RhMIdqUzSeibRqMPd5cgqXPaX/4ujRL6piHpwpPQN41GH+4uQZLOlf3l79IooW8a7RFEkloRjOlOzwHpNNpXtglJnc++yCYa9Rw4HrWuR/JIunbvR4PqAx9wYN9WJHU+FZFNNOo5IJ3OpueA8TSqiLyFpM6nqnInByu20aBLTHd6htKRdOHpOWA8jSoibyFJ50JV5U4OVmyjQZeY7vQMpdMeQSTpFJIG30CjPTvXU1d7CEmdR82h3ewr20ijpEE3oFPrNfgGGu3ZsR5JncuHu96hqrKMBl279eWigdfS2ST0TSO+56U0OFy1j73v/xpJncue7etplPS5iRAIIOnCk9A3jfiel9LgcNU+9r7/ayTpbNuzfT2Nkj43EQIB2iOIJJ1Cjz5fIKFvGg2OHqll17ZXkdR57Nr2Go2SBn2ZuMTB6NR6XzqZmLiLaHDow+3sff/XSOocjtRVs2vbazTqk3ITnVXvlJtotPuPr1Fb/SGSOocPd22mcu97NAgEY+h96WQkXbh6p9xEo91/fI3a6g+RpLPlw12bqdz7Hg0CwRh6XzqZ9goiSW2QPPRWGn34wdvsK9uEpI6vfHsRB/b+gQaBYFeSL78VtU0g2JXkobfR6IOtazm0fweSOr7Ie7+k7nAlDbolDaXXJV+js7po4LX0SL6KBkeP1FL2h18iqeOrObibyHtraNR36G3ExCUh6cJ10cBr6ZF8FQ2OHqml7A+/RJLOhpqDu4m8t4ZGfYfeRkxcEu0VRJLaoNtFl9N36DQaRf7wSyr3/A5JHVdF5Dfs/uN/06h/6l/TtVtf1Ha9Bk8kMfRFGr3/29XUHNqDpI7rg9JC9pe/S6P+V36Dzq5/6l8TCHShwcF9pZT9/hdI6rjqDley87erOXq0lgY9+oygz5BbkHTh65/61wQCXWhwcF8pZb//BZJ0JtUdrmTnb1dz9GgtDXr0GUGfIbdwOoJIUhslX34riaGxNNpR8r/4cNcWJHU8+97fQNl7v6BR0uCJ9PrcV1D7DRjxLWJ7DKRBbU0l2995jqrKMiR1PJGtL7N355s0Cg27i269rqCzi0sYxIAvfJtGFR/8hvd/X4Ckjqfm0B7+tPk5ag6V0yAmLokBI+5BUnSISxjEgC98m0YVH/yG939fgCSdCTWH9vCnzc9Rc6icBjFxSQwYcQ+nq8v/+xEkqY0S+3+RQ/tKqK3aTYPKPb+D+iP0SLoESdGvvv4oH2x9mfLtRTRKDH2Ri6+6D52eYJc4uve6kgMf/IqjR2o4eqSGil1v0zU2kfiE/kiKfoerK3j/ty+wf/cWGiVflkmfy/4v9In4npcQ7BLHwT1v06Dm4C4OVfyRbgkhYmJ7ICn67d9dwo6SVdQd3k+DQJdYBo/+HvE9L0VS9IjveQnBLnEc3PM2DWoO7uJQxR/plhAiJrYHknQ69u8uYUfJKuoO76dBoEssg0d/j/iel3K6AvUfQZLa4eiRGnZufIIDuzfRKD6hP8mD00ns83kkRacPd71D+Z/e4HB1BY16DriGi0fdjz676v2l7Nj4BLWHdtEosffl9BmcTrfEAUiKPvX1R9m783+z+49vUF9/hEbJl99K36HT0PH2lr7AB+/+hKaSB4+nz6AvEuwSi6ToU3NwN+U7itm/+10adYntyaC0v6N772FIik57S1/gg3d/QlPJg8fTZ9AXCXaJRZLaoubgbsp3FLN/97s06hLbk0Fpf0f33sP4LAL1H0GSTkNky1L2/fElmopPCHFRv+Ek9hlK17ieSLqwHa7aS+We3/PhrneoOVROU8mXTaHv57+Ozpy6mn2Uvf0vHNi9iaYSel9Gz76pJPa6jGBMHJIubFWVZVSW/5aKXW9zpLaKRoFgDKHh3yRp0JfRye2PFBPZ/D85UnuQRsEuXUnqP5LE5Cvo3nMQki5sR4/WcmDvVvbvLqFyz+9oqnvvVAaMuIfYHgOQFN32R4qJbP6fHKk9SKNgl64k9R9JYvIVdO85CElq6ejRWg7s3cr+3SVU7vkdTXXvncqAEfcQ22MAn1Wg/iNI0mnaX1bM7t89y+FDH9BSbLfexPXoS9e4RIJdYoEAks63eo7W1VBbs5/qg7uorf6QluJ7Xkq/K7LokXwVOjv2lK5m9+/+nfqjdbQUn9CfuG59iIlLIBDsiqQLQP1RjtRVc7i6guoDH3Ck9hAtJfb7C/pdOZ3YHgPRqdVW72HXuz9lf1kRLXWJiSc+oT+x8b3o0rUbBIJIOv/qj9ZRd/gAh6v2UlVZxon0HTqN5MtvRVLHUVu9h13v/pT9ZUW01CUmnviE/sTG96JL124QCCKpc6o/Wkfd4QMcrtpLVWUZJ9J36DSSL7+VMyVQ/xEk6TPaU7qafX98idqq3UiKTnEJg+h1ydfo9blJ6Oyrq9nHntIXqPjTLzl65DCSolOPPiPodemNJPb7C9R+B8t/w95tBRzYvRFJUSoQpNfgifROuYnY7v2R1DEdLP8Ne7cVcGD3RiSpzQJBeg2eSO+Um4jt3p8zKVD/ESTpDKn84E0O7PoVB/dspraqHEkXttgeIXr0+QKJ/b9Ij+SR6Nw7euQw+8ve4MCuDRzau4UjtQeQdGGLv2gICclXkRgaS3zPFPTZ1VRuZ39kHQfL36Kq4vdIurAFY7rRvXcqCX1H03PAeLp07YGkzqGmcjv7I+s4WP4WVRW/R5JaCsZ0o3vvVBL6jqbngPF06dqDsyFQ/xEk6Syoq97L4YNl1NVUcPRINZIuBAGCMfHExPUiNmEgMbEXoQvL4UMRag99QF3Nh9QfrUXSBSAQpEtMd2K69SEuYRDBLvHo7Kk/WktN5XZqq8s5UnsQ6o8i6fwLBLvSJTaR2O79ie0xEEmqP1pLTeV2aqvLOVJ7EOqPIqlzCgS70iU2kdju/YntMZBzIVD/ESRJkiRJkiRJkqQoEESSJEmSJEmSJEmKEkEkSZIkSZIkSZKkKBFEkiRJkiRJkiRJihJBJEmSJEmSJEmSpCgRRJIkSZIkSZIkSYoSQSRJkiRJkiRJkqQoEUSSJEmSJEmSJEmKEkEkSZIkSZIkSZKkKBFEkiRJkiRJkiRJihJBJEmSJEmSJEmSpCgRRJIkSZIkSZIkSYoSQSRJkiRJkiRJkqQoEUSSJEmSJEmSJEmKEkEkSZIkSZIkSZKkKBFEkiRJkiRJkiRJihJBJEmSJEmSJEmSpCgRRJIkSZIkSZIkSYoSQSRJkiRJkiRJkqQoEUSSJEmSJEmSJEmKEkEkSZIkSZIkSZKkKBFEkiRJkiRJkiRJihJBJEmSJEmSJEmSpCgRRJIkSZIkSZIkSYoSQSRJkiRJkiRJkqQoEUSSJEmSJEmSJEmKEkEkSZIkSZIkSZKkKBFEkiRJkiRJkiRJihJBJEmSJEmSJEmSpCgRRJIkSZIkSZIkSYoSQSRJkiRJkiRJkqQoEYMknQVlH/6Jsg//yJ4DH3CgpoLDdYeBeiRJkiRJkiRJHUeXYFd6xCbQq0df+vcczOd6X06XYAxnUwySdIZs3/sH3t65jt9GNnGgZj+SJEmSJEmSpM4lEAjw+X4jGX7xGIYPHMPZEKj/CJL0GWzb81uK3vsFf9i9BUmSJEmSJEmSGvTu0Y/0y77C6M9dy5kUqP8IknQajtYf4aXN/8Gbf3yVlnr16E9y4kAu6pZM99hEYrrEIkmSJEmSJEnqWI7U11F9+CCV1fvYe6CMyId/Aupp6pI+n+crw24jdNFgzoRA/UeQpHaK7N/Ozzc9wwf7d9Coa5c4Lus/kkv6pJIQn4QkSZIkSZIkqXOpO1rL9j2/Zeuut6k4tJumbr7qbxg1eDyfVaD+I0hSO2wr/y0/e/MpauqqaDQ0lMawgWOJ6RKLJEmSJEmSJEmluzezeUcRh+uqaXRDaibjL/sqn0Wg/iNIUhvt2LeVn677Z2qPHKZBt9gErr70BvpfdAmSJEmSJEmSJDVVVXuQDdvWEqnYRqNJw6YxbshETlcQSWqjQ4cP8L82LqH2yGEaXNQ9meuunEr/iy5BkiRJkiRJkqSWunXtwTVD/5KUvsNptGbLCt6NbOR0BZGkNnrx7eXsO1ROg4T4JK4Z+pf0iLsISZIkSZIkSZJaM/rSG7gkOZVGBW/ncbBmP6cjiCS1wW92rGNL2a9pNCblK3SLTUCSJEmSJEmSpLb4i5RJ9E4I0eBgTSVr313F6QgiSW3w+u/zafSFwV+id0IISZIkSZIkSZLaY9TnrqPRW9uL2L73PdoriCSdwsY/vcHeg7tokNS9L58PjUaSJEmSJEmSpPbq1aM/nw+NptH/3vYK7RVEkk7hrR1v0GhoKA1JkiRJkiRJkk7X0NBoGm15/032V+2lPYJIUiv2HPyA7Xv/QIP4rt35XJ8rkSRJkiRJkiTpdMV37c4lyak0KolspD2CSFIrtu4uodHAXpchSZIkSZIkSdJndXGvy2m0dfcW2iOIJLVi576tNOrXczCSJEmSJEmSJH1W/XoOptGOfVtpjyCS1Irdle/TqFf3fkiSJEmSJEmS9Fl1CcZwUfdkGlTXHuLDqr20VRBJasWH1XtpEAgE6R7XE0mSJEmSJEmSzoQecRfRaH/1PtoqiCS1orq2igaxMfFIkiRJkiRJknSmxMbE06imtoq2CiJJraivP0qDAAEkSZIkSZIkSTpTAgRodLT+KG0VRJIkSZIkSZIkSYoSQSRJkiRJkiRJkqQoEUSSJEmSJEmSJEmKEkEkSZIkSZIkSZKkKBFEkiRJkiRJkiRJihJBJEmSJEmSJEmSpCgRRJIkSZIkSZIkSYoSQSRJkiRJkiRJkqQoEUSSJEmSJEmSJEmKEkEkSZIkSZIkSZKkKBFEkiRJkiRJkiRJihJBJEmSJEmSJEmSpCgRRJIkSZIkSZIkSYoSQSRJkiRJkiRJkqQoEUSSJEmSJEmSJEmKEkEkSZIkSZIkSZKkKBFEkiRJkiRJkiRJihJBJEmSJEmSJEmSpCgRRJIkSZIkSZIkSYoSQSRJkiRJkiRJkqQoEUSSJEmSJEmSJEmKEkEkSZIkSZIkSZKkKBFEkiRJkiRJkiRJihJBJEmSJEmSJEmSpCgRRJIkSZIkSZIkSYoSQSRJkiRJkiRJkqQoEUSSJEmSJEmSJEmKEkEkSZIkSZIkSZKkKBHDeVPMvMB45tLEtDzKfpZFCEmSJElSR7PpiRFMf5w2S75yLJf1T2TI1ROYkDmJCZcncl7UlbP+X1dSOeVbTBzAccpXPcB1975MU/fnb+aeq5EkSZJ0FgSRJEmSJOkCVP7ueta/+jLPPvEI934pna8+sJKtBzmnyt94hgeu/TJ3P7qFGiRJkiRdCIJIkiRJkhQFdvz7I9x177OU1HD27VrPM9/8KtdNnc+aUiRJkiRdQIJIkiRJkhQlyn85j9wfl1DD2VRO/pxsfrB6J5IkSZIuPEEkSZIkSTpP7s/fzDuRzbwT2cw7kc28E9nMO5HNbPjtS+T98E7G9uM4m/7xWV7fiyRJkqROKogkSZIkSReYuIsuZtTXZ/PUsvsZRUsrWfdmJZIkSZI6pyAdVXkJBctmM+MrNzB+aIBAIEAgECAQGMCw8A1MnTmXJS+WUsHJFT8WIBAIEAgECAQCBG5bToRPRNYtZ8HMqYwfGiAQCBAIDGH8bfexYNVGKupom+oIhctmMyM8jAGBAIHAAIaFZzDv2WIidXxi3TwCgQCBQIBAIEAgMJXlO2hux3KmBgIEAgECgQCBQIDAY8WcTPFjAQKBAIFAgEAgQCAQYN46Tq68hIJls5nxlRsYPzRAIBAgEAgQCAxgWPgGps6cy5IXS6mgjaojFC6bzYzwMAYEAgQCAxgWnsG8Z4uJ1PGJdfMIBAIEAgECgQCBwFSW76BVkXXLWTBzKuOHBggEAgQCAYak38CMB5dQ8F4FkiRJkqJP3NV/w/0zOc6ad3cAO3k+awTDQyMYHhrB8NAIhoeyeb6U1m1+hltDIxgeGsHw0AiGh0Zw9093Qlk+D4RGMDz0ZR5cTQsv82DaCIaHRjA8NILhT7xFm9SUs37JPB6Y/FWGh0YwPPRlbp76CD9e9RbldbRDDeW/zueZ//EAd0/+KsNDIxgeGsHw9Onc/cB8ni/cSmUdp/AWPw6NYHhoBMNDIxgeGsEDq8r5WF05m1Y9Q+43p/PV0AiGh0YwPH06D/yPZ3h5cyWSJEnShSRIh1NNybIZDOk7jMl3L2DJmkKK36OJCCWvFLLyqXnMyBhCang2Bdtos6qKjSy+bQgD0qcz+6mVFL/Hn5VSvGIxszNH0+va+yjYRqsq3lzA5JQB3HD3Apa8UkKEBhFKXlnC3KzxDLj2Pgq2cZ5VU7JsBkP6DmPy3QtYsqaQ4vdoIkLJK4WsfGoeMzKGkBqeTcE2WlXx5gImpwzghrsXsOSVEiI0iFDyyhLmZo1nwLX3UbCN9ikvZsFtQxiQPp3ZT62k+D0+VbqukCU/mMHkoanc8N3llBxAkiRJUlSJ47IvTKSl8rrDwMVcN20iza0n/42dtKbkjXxKaGoCk6+5mDOtZvOzPPDlL3P3nGdZ8+udfKKcrW+sZOG907nu2kd4eTunVPnWszz45a9y3eSH+MEzL7P+1zv5VOlbrP/3Z3gk6y8ZNzqbHxTspH1qqHzrWR649stMv3c+z65+ix38WelbrHlmPn87MZ3pc15jB5IkSdKFIUgHU/psNuG7l1BK20ReWcDk2+exsZpTq8hn7o2juW9FKa1at5jJM5dQyolVr5tHxpjZFEQ4uXWLmXz7bArLOG9Kn80mfPcSSmmbyCsLmHz7PDZWc0LV6+aRMWY2BRFObt1iJt8+m8Iy2mbbSmakj2f2ilJaF6Hwn6czbNI8ig8gSZIkqYNIvu4mptLc+tXr2cHJlLB+RQnN3DyZ61I4o3a/+q/cPXEea0o5udKV/O23/5WSGk5qx+pHuPWr88h/t5xT2rWeZ+6ezt0L36KGttlfuJBvf3Uea0pp1aYlM5n3051IkiRJF4IgHcmO5czOWk6Edlo3lyVrKjilNctZvo62eXEuS16p5jjlBcyeMpdi2mDdAqbPXMR5sWM5s7OWE6Gd1s1lyZoKjlNewOwpcymmDdYtYPrMRZzaRhbcPpUl79F26+aS+WABFUiSJEmKDjX84e2XaSm1Rywf6z2WiXfS3Ksvs76UE9u8nhc208zUKRNI5sx69vGFbKINfr2QHxeUcyI1v/5XHvrmSnbQHuWs/8e/5eFVO2mL9Svy2UTbvPb486yvQZIkSTrvgnQgG1fMZyVNhQg/nM/Wynrq6+upr6+nqrSIRd8I0dLiTSW02bgc8jbuo6q+nvr6KsoKc8kI0UKEJetKaGnjsrksjtBcKMyclVvYV1tPfX09Vbs3kPeddBpEIhHOh40r5rOSpkKEH85na2U99fX11NfXU1VaxKJvhGhp8aYSWtq4bC6LIzQXCjNn5Rb21dZTX19P1e4N5H0nnQaRSIRTiTyby+x1NDcuh7yN+6iqr6e+vop9G/PIGUczkafmsmQTkiRJkqJAza//jYVPcpyxVw7hE4mMvflbJNPUa7y8ficnUvJGPiU0lcmEcYl8bMBkfhjZzDuR/+Lxm2lhIo9v3Mw7kc28E9nMO9+9ilNKmcj3nn+JDTs2805kMxs2Psk91yTT0pr1W6ikhZq3eOahhWyiueRrvsWP/ruYDZHNvBPZzIaNeTx6z1iSaaqc/Hvn8XwpbTYq+594bsuveSeymXf++F8s/X8mkEwLu1ay6V0kSZKk8y5IB5L2nSLKSorIX76IWXeFSZ2Sy9OPZpCSwKfiL00n55FcwrRQto8K2iA0i7VrFpE1Kol4GsQTun4OeU9m01JkexkVNFVMwRMbaS6d3FX55E5JJSmGj8Unp5G1sID874U4X9K+U0RZSRH5yxcx664wqVNyefrRDFIS+FT8penkPJJLmBbK9lFBU8UUPLGR5tLJXZVP7pRUkmL4WHxyGlkLC8j/XohTKyV/2UqaCeWQv3oRWaOSiKdBPEmjslj0bB6ZNLWR+T8rpBpJkiRJF6S6Gip3beW1JY9w9+SFbKKFft9iwrg4GsX9xVim9qOZ136+nh20VML6FSU0M3My1/bmLJjAo8t/yJ3XXExcDB+LGzCB++ffzwRaKC3nQ5qrXPs8CzfTTPK0fyLv+fuZeHkicXwibsBVTP2HJ/nRg1fR3GssfHY9NZxa8swlLJ03mdTecXwsLpmx9/8TD3+dFsrZ/UElkiRJ0vkWpCOJiSd0ZToZt+cwf+latqzMJoUTiO9GEi2UV1DNqYUfzSGcwHGS0sJk0kJ5BdU0samI5yM0d9dscsbFc7wkMr6VSwbnSUw8oSvTybg9h/lL17JlZTYpnEB8N5JoobyCaprYVMTzEZq7azY54+I5XhIZ38olg1PYVkzBGppJ+242Gckc79JMcr5HM5FlxZQgSZIk6XxbOHkEw0MjGB4awfDQCIaHRjB80NWMG/mX3DtnJZtoKZnJ87IYG8cxcWOZ9O1Umil8mfWlNLd5PS9spolk7rlhFHGcBTP/hptSOF7KZYyihVfLKaepcl5btZLmJnD/dycziBOJY9R35/K9ETRTvvB1NtVwCmO5/xtjiaOlRK68ZiItlR+sQZIkSTrfgnQW1RVE3i1m5bLZzJg2l5WcnvTUFE4oPp54WlexvZSNNJd143iSOInLw2RO4sJTXUHk3WJWLpvNjGlzWUnrKraXspHmsm4cTxIncXmYzEm0qvr3G1hJcymDBnBi8QxLy6SZyAZKdiBJkiQpyox68Ec8dnMyLaVOmEwqTb3Gy+t30tSOTa9TQhP9sphwTRxnw6TUQcRxIskkX0frav7AltU0d91ExqbQilTG/mUyzb3Apnc5hau4LIUTio+PQ5IkSboQxdBRlZdQ/HoR+WsKKH69iMLNET67EN3iObHQAIbRuuoDZbQ0bHCIkxtAyihgDedXeQnFrxeRv6aA4teLKNwcoT2qD5TR0rDBIU5uACmjgDWcVEV5KS2tzBpAIIs2WknpDmAQkiRJkqJBv7Hc+Q9z+PaUIcRxAiPGctMIKNnMp15bu4nyr19MMg12sn71eppK/fa1jOLsSL6oJycWR3xPWre3nJ20MPYyBtG6voNHAS9zTDm7P6gEEjmpfvHEcWLJAy9DkiRJuhDF0NFEClnw3fuY/2wJEc608aSEOIfi6ZbI+RMpZMF372P+syVEOJfi6ZaIJEmSpM6s3xDGpvaFAalMum4SkzKuIjmOVqQyccYEfvDAa3xq9RrWl01m8gCgdD35r9JEKjdNSOVs6dsnkQtB+cEaIJGTGnsxfZEkSZKiSwwdybblTE+fzvIIzYVSCd+YQca140lLS2N0QjHZQ6ezkihQy/mxbTnT06ezPEJzoVTCN2aQce140tLSGJ1QTPbQ6azkDKtFkiRJUidwf/5m7rmaM2LQ2IlM4DVeo9HLrFlXzuQpyex4I5/1NHHd7UwcQYd3ce9EJEmSpI4mhg6jlCUzp7M8wjGhLJ5+cTHZo5JoZkcx50VMPC1tKI3AuBAnVsqWdZxh1VRVcgqlLJk5neURjgll8fSLi8kelUQzO4o5pZh4WtpQGoFxIU6slC3raFV8wgBamvN6PblfQpIkSVJnlTKJqV9/hNf+nU+tKVhP+ZRRrF+9nqYmTBnLIC5QvZO5mBbW/4EdXMUgTm5H6SaaS6VvnzgkSZKkjiZIR/FeIctfpJnwo7lkj0riONVVVHDuhYaOJo3mVq4pooKTeK+QlWs4PW+XEuFESti4hta9V8jyF2km/Ggu2aOSOE51FRW0LjR0NGk0t3JNERWcxHuFrFxDq5JS0wjTXHFJKZIkSZI6s0TGfiWTZlavYf2r68l/lSYmMHHsxVyw4i5j2M009+rLrC/l5GreYn1BOc30u5bUoUiSJEkdTpCOojxCIc3tq6jgeNUUP7uEQs6DUeOZGqK5ZfNZvK6a45Wy/NG5FHCaXt/AlgMcp+LFPOZvonXlEQppbl9FBcerpvjZJRRyCqPGMzVEc8vms3hdNccrZfmjcyngFC4fTcYomil8ZDEFFUiSJEnqxBJvmMydNPUyC//Hj1lPE1/P5CspXMCSueqGCTT3GgufyGcHJ1LDpsWPs3AzzSR/fSyj4pAkSZI6nCAd2MYHc7jv2Y1U1PGx6m3FLPnuZDIfKeb8SCfju2k0V8zcKVOZ90op1XV8rHpbIQtuu4HpP4nQJoNSGR2iucgCps9cwsZyPlEdoXjZfWRkLCBC+218MIf7nt1IRR0fq95WzJLvTibzkWJOLZ2M76YCfrkVAAAgAElEQVTRXDFzp0xl3iulVNfxsepthSy47Qam/yTCqaWROTODZiILmHzjfSxZF6GaP6uuoGTVXMYPHc8Nd89mwbKVFL8boaIaSZIkSR1R3Cgm3J9MUztKd9LU1K+MJZHWJHPxUFrYyR9+V8nHDpazdXslZ9Ogm/+GO/vRTPmKh5g+dT75myup4RM1ZW/x/N/P5G8ff4vmJvLgXWOJQ5IkSep4YriQrJjOgMB02uXRIuofTocr08gBFtNUMYuzRrM4iwtG2l255DwxmcURjokUMDc8hLmcrlTS7wrB9yM0FfnJDEb/ZAbtdmUaOcBimipmcdZoFmdxWtLuyiXnicksjnBMpIC54SHM5fSk3D6XOU8WMG8Tx6xbzIz0xczgBN4rpnDZAhqE7s2n5MkMkpAkSZLUscQx9qtZJC9cSDkncjsTr0nkVOLik4Fyjinhx3+Vzo/5swfzeOe7V3HW9BjLXfMm88I38ynnmPI3nuHBic/QumQmP/U9Jg9AkiRJ6pCCdBRJGWQ/kU6bhMJk35VBMyu2UMo5kJzB/FW5pHNqoW/kkfcwbRBP+LbZpNM26d/IIkwrkjLIfiKdNgmFyb4rg2ZWbKGUFpIzmL8ql3ROLfSNPPIe5tQS0pn7VC7hEO0zLpeVj2eQhCRJkqQO6epruWsEJ5R8/0TGXsQpDbk6k2RaUbKDcs6u5JsfZdn8ySTTHslM+IeneGzKxUiSJEkdVZAOJO3vClj7cJgQrRiXQ94r+Tx9f5g0miqgaBPnRPy4ORT8cg7hECcVunk+BU9mkdKVthk1i7yf55BOa0JkPLGBgkcnk0Tr0v6ugLUPhwnRinE55L2Sz9P3h0mjqQKKNnGc+HFzKPjlHMIhTip083wKnswipSttEj9uDvmv5DHr+hBtkTJtERtenEN6ApIkSZI6rFTGTkvleMlMvW4UcZxa3DVTeXBaMid1BKo52+IYcsc/8cJLc5h8ZTKnlDKR7/3nap66J5U4JEmSpI4rSIeSRPjRtZRsfJ7592aSfjl/lkL6tBwWFWxh3+uLyLoyHkaNZ2qIJjYyf3kh1ZwbSZNyWVuyhecfzyFzXAqfSCF9Wg6LCrZS8vNZpCVUsK+MFlIIJXNCKTcvoqisiLxHswmPCPGJEKnXZ5LzeB5F20vJ/7s0kmJogyTCj66lZOPzzL83k/TL+bMU0qflsKhgC/teX0TWlfEwajxTQzSxkfnLC6nmeEmTcllbsoXnH88hc1wKn0ghfVoOiwq2UvLzWaQlVLCvjBZSCCVzQvFXZjG/sJSy4jzm35tJeFwKx6SQPimTnMfzKNpexdaf5ZCWhCRJkqQOLvUrtzOBFvplMeGaONrmYib/f6tZvfR+pl4zhGT+LOUqxn79Th69YxR9OTcSr7qdx//rJV59+Yc8OjOTsVdfzKdSrmLs1+/k0eU/Z93rP+TOsYlIkiRJHV2g/iPoAhVh+W0DmL6CJnIpqp9DOh1ZhOW3DWD6CprIpah+DunoXHvshXtoEN+1B5NHZSNJkiRFhdKV3Jv+CK9xTOojK3huZiqSJEmSLgwbthVSunszDf5qTA6f7z+StohB51Ax84bOovDa8WSkjSZ1TBqjr0whlBTPCVVvYcvrNDcthRSiTTHzhs6i8NrxZKSNJnVMGqOvTCGUFM8JVW9hy+s0Ny2FFCRJkiSpLWpY/5OFvEZTqdw0IRVJkiRJ0S8GnUMhUr5QTOGyYgqXccy0RWx4Mpu05Hg+UU3Fto3kPTqDeRGaSUtPJUS0CZHyhWIKlxVTuIxjpi1iw5PZpCXH84lqKrZtJO/RGcyL0ExaeiohJEmSJOkEamogLo6PHdzJ+iXzePDJcpoZMZmxI5AkSZLUAcSgcyiF9JszYFUBzay4j9Er7uPUMsi5OY3ok0L6zRmwqoBmVtzH6BX3cWoZ5NychiRJkiSdSHnBQ1x378u0ZlLOTaQiSZIkqSMIonMqZdpccsdxGkJkLl1E9uVEpZRpc8kdx2kIkbl0EdmXI0mSJEknlNh7EK26ejb3TElGkiRJUscQROdWQjpzXtzAomkptFkoleylheTdlULUSkhnzosbWDQthTYLpZK9tJC8u1KQJEmSpJOJS+jJSV19O0/9y52kIkmSJKmjiEHnXlIaOT/byvR3CylYU0Dh6o1sKS2k+D0+FRoRZtiYNDInTWXqlHRC8US/pDRyfraV6e8WUrCmgMLVG9lSWkjxe3wqNCLMsDFpZE6aytQp6YTikSRJkqTWDb6SO68ZwgtvbKWcBhcz6uYJTMq4iZtuvorkGCRJkiR1IIH6jyBJJ/HYC/fQIL5rDyaPykaSJEmSJEmSpDNhw7ZCSndvpsFfjcnh8/1H0hZBJEmSJEmSJEmSpCgRRJIkSZIkSZIkSYoSQSRJkiRJkiRJkqQoEUSSJEmSJEmSJEmKEkEkSZIkSZIkSZKkKBFEkiRJkiRJkiRJ/4c9+AGIgjD4///mlOP8Q6Kop5QKZh7WQHSSNMqQ8ulcz6PMPRmVz6O59Qyb30f39JuRc8/DljV1T9/h1tSevjH9psmjy8BVXuPppL5R0BEitIJEwciLK09PcHoSyE9al3eICv4/+7xeEiQMiIiIiIiIiIiIiIiIiAQJAyIiIiIiIiIiIiIiIiJBwoCIiIiIiIiIiIiIiIhIkDAgIiIiIiIiIiIiIiIiEiQMiIiIiIiIiIiIiIiIiAQJAyIiIiIiIiIiIiIiIiJBwoCIiIiIiIiIiIiIiIhIkDAgIiIiIiIiIiIiIiIiEiQMiIiIiIiIiIiIiIiIiAQJAyIiIiIiIiIiIiIiIiJBwoCIiIiIiIiIiIiIiIhIkDAgIiIiIiIiIiIiIiIiEiQMiIiIiIiIiIiIiIiIiAQJAyIiIiIiIiIiIiIiIiJBwoCIyBn07BFKu9bjLYiIiIiIiIiIiIiIXCitx1vwCe0RSlcZEBE5g75h19Dui9ZjtLR+gYiIiIiIiIiIiIjIhXD0i8P49DGG01UGRETOYEDvwfg0HnUjIiIiIiIiIiIiInIhNB09gE//PoPpKgMiImcwpN9wfNyHnYiIiIiIiIiIiIiInK+mowfwfnGEdoPCowjtYaSrDIiInMGIyBvwaTi0FxERERERERERERGR8/XpoTp8RkSOpjsMiIicwajBcYT17EW7zxrraTp6ABERERERERERERGR81HvrsZntDme7jAgInIWcdfdjM/uzyoQERERERERERERETlXzoO78Rz5nHb9ew/i+kE30R0GRETOYvzwSfjs/qwCz5HPERERERERERERERE5F1WfluLz7ejb6S4DIiJnYb7mOhKGJeNTWf8WIiIiIiIiIiIiIiLdVeV8l4N/ddEuolckt4ycQncZEBHpghTLNIw9TbT7rLGe9z8pQkRERERERERERESkqz717OEv+4rxSYmdzrkwICLSBeGmCKw33YtP9afv8VHDe4iIiIiIiIiIiIiInM3nTfso2W3DZ+yw7xB37UTOhQERkS4aO+w73DJyCj6V9UV8sK8YEREREREREREREZHTcR7czVvVebQeb6Hddf1H8g/x/8y5Cmk7ARGRbni54v+y4+MifKL6X8/YYbfRO+waRERERERERERERER8/rKvmCrnu/gMDr+W+yf+K+GmCM5VSNsJiIh002t/+W/erbXjYwgxEBt1M6PMCYT2MCIiIiIiIiIiIiIi31wfu6uo/vQ9Go+68Rk2YBTfH/8vhJv6cT5C2k5AROQcOOoKsb2/EX+GkB4Mj7QwNGIkg665ltAeYYiIiIiIiIiIiIjI1c992EmDZy/1Bz7ir8cO4W/8iNu4O24WF0JI2wmIiJyjz5ucbK/Op7qhnM70CbuGXsZwQnsYEREREREREREREZGrS+vxFrxfHKHJe5C2tuN0NLDvUFIs0xgzdDwXSkjbCYiInKc9n39AaV0h1a6diIiIiIiIiIiIiMg325B+wxk//Da+PWISF1pI2wmIiFwgh466+aihglp3FU5PHU1eDyIiIiIiIiIiIiJydQvraWJIv+EMH3ADNwyO49r+MVwsIW0nICJykRxrOcphbyPNrccQERERERERERERkatLT0NPehv70ifsGi6VkLYTEBEREREREREREREREQkCBkRERERERERERERERESChAERERERERERERERERGRIGFAREREREREREREREREJEgYEBEREREREREREREREQkSBkRERERERERERERERESChAERERERERERERERERGRIGFAREREREREREREREREJEgYEBEREREREREREREREQkSBkRERERERERERERERESChAERERERERERERERERGRIGFAREREREREREREREREJEgYEBEREREREREREREREQkSBkRERERERERERERERESChAERERERERERERERERGRIGFAREREREREREREREREJEgYEBEREREREREREREREQkSBkRERERERERERERERESChAERERERERERERERERGRIGFAREREREREREREREREJEgYEBEREREREREREREREQkSBkRERERERERERERERESChAERERERERERERERERGRIGFAREREREREREREREREJEgYEBEREREREREREREREQkSBkRERERERERERERERESChAERERERERERERERERGRIGFAREREREREREREREREJEgYEBEREREREREREREREQkSBkRERERERERERERERESChAERERERERERERERERGRINETEZEL7EhDAwf+Uknj7hr+6nRy7OABWrxeaENE/ISEhNCjVy9MAwbQ59rr6DdqFAO+FY8pMhK5chxvacG9sxzPR9Uc/ngvR/d/zheHD9PWehwRCWQIDcXYrx+9zUMIj46h/5gbiRg9Gum+xj27OfjBX2isreVIw6c0HzpEa3MzInLhGXr0IDQ8HNOgQYQPH0GEJZaBCeOQy+t4SwvuneV4Pqrm8Md7Obr/c744fJi21uOISCBDaCjGfv3obR5CeHQM/cfcSMTo0cj5Od7SgntnOZ6Pqjn88V6O7v+cLw4fpq31OCISyBAairFfP3qbhxAeHUP/MTcSMXo0F1NI2wmIiJyvtjbq/2zD+eYbeD6qRkTO3YBvxRF1ewrXpqQil8+B9yvZt/11Gt4u4nhLCyJybnoNGsSQWycx7M4p9BpsRk7v2MED1Bf8mU/fepMjn36KiFw+PUwmhn7nVq69404iRluQS+fA+5Xs2/46DW8XcbylBRE5N70GDWLIrZMYducUeg02I1134P1K9m1/nYa3izje0oKInJtegwYx5NZJDLtzCr0Gm7nQQtpOQETkPHy87RX2vPQixw4eREQunN5DhzLye//ItZNTkUvHU13F7j9uZn95GSJyYQ2fejejZt5LaN9w5KTjzc3UbMqlNv8lROTKY56YxPX/OJPw6Bjk4vFUV7H7j5vZX16GiFxYw6fezaiZ9xLaNxw5PU91Fbv/uJn95WWIyIU1fOrdjJp5L6F9w7lQQtpOQETkHDTt3UtVzrMc+OAv+DP27cvAG26g37Dh9B1sJqxfP3oajRASgoj4aWuj5ZgXr+cQh10uPB/vZf+uj2jxevE3eEIiY37wEKaBg5CLa9cL69nz0ot0NOD66+kfHUP40Ch6DxhAaO/ehBgMiEig4y1fcKzpMEf276dx3ye49+zmcEMD/ozXXEPsnB8w9LZJCHzmeJeqP/wfjn7+Of76DBrEgOtH0e+66+gzcBDG8HB6hIYiIhdeW2srzUeOcPSAm0ank4O1tRysq6WjG9LvZ+T370EuvF0vrGfPSy/S0YDrr6d/dAzhQ6PoPWAAob17E2IwICKBjrd8wbGmwxzZv5/GfZ/g3rObww0N+DNecw2xc37A0NsmIafa9cJ69rz0Ih0NuP56+kfHED40it4DBhDauzchBgMiEuh4yxccazrMkf37adz3Ce49uznc0IA/4zXXEDvnBwy9bRIXQkjbCYiIdFPD229R+buVHG9pwaffsOFcNyGRQWPGICLnrqFiJ584HBx2NeBj7NePuP+1kIFjE5ALr7mxkYrf/gb3znJ8ehiNDJuYxNCEcYSFhyMi56bRuQ/ne+/RUFmBv5i07zH6gX/mm2z3i5upyX0Bf4PH3EjUtycQMXw4InL5eD0enDvKqC8ppu34cXzME28hfsFPMISGIuevubGRit/+BvfOcnx6GI0Mm5jE0IRxhIWHIyLnptG5D+d779FQWYG/mLTvMfqBf0b+prmxkYrf/gb3znJ8ehiNDJuYxNCEcYSFhyMi56bRuQ/ne+/RUFmBv5i07zH6gX/mfIW0nYCISDd88j9/5i/PrManp6kXo+64kyFjxyIiF84njnfZ/T8FtLW14ZPwyCLMSbcgF453/37Klj1B0946fKLGjScmZTKhvXohIhdG4yefsHu7nUP1H+Nz3R13clPGj/kmqlqXw96X/4RP+JAhjEy9g/7RMYjIleNYYyN7Crfjer8Sn/6xYxj/2M/o2bsPcu68+/dTtuwJmvbW4RM1bjwxKZMJ7dULEbkwGj/5hN3b7Ryq/xif6+64k5syfsw3nXf/fsqWPUHT3jp8osaNJyZlMqG9eiEiF0bjJ5+we7udQ/Uf43PdHXdyU8aPOR89sk5ARKSLPn3rTd7//e/w6R8dTfy99xExYgQicmFdc+21DLTE0ujcR/Phw7RreKeICEssvYcMQc5fq9fLe0/8kqa6WnzGTJvOiORb6REaiohcOGHXXMPQsWNpbW6mcd8ntGus3cMXTU0MGv9tvkl2bdxAXX4ePlHjxhM38156RfRHRK4sPcPCGGSJxdinD+6aGtp59+/nUM0urk2ZjJybVq+X9574JU11tfiMmTadEcm30iM0FBG5cMKuuYahY8fS2txM475PaNdYu4cvmpoYNP7bfFO1er2898QvaaqrxWfMtOmMSL6VHqGhiMiFE3bNNQwdO5bW5mYa931Cu8baPXzR1MSg8d/mXPXIOgERkS5o3LObsicex2dQ7BjiZqbT02RCRC4OY58+mG+6icZ9TryHPLRz7yxn6HeS6dm7D3J+KrKf4sD7lbTrERrK2PvuZ+BoCyJy8QwYORJDaCgHa2tpd6hmF6F9+hAx2sI3wb7tr1P9f9fiMyL5VkbdOQURubKFD42ir3kIn33wF9od/ewzvPv3MzjxZqT7KrKf4sD7lbTrERrK2PvuZ+BoCyJy8QwYORJDaCgHa2tpd6hmF6F9+hAx2sI3UUX2Uxx4v5J2PUJDGXvf/QwcbUFELp4BI0diCA3lYG0t7Q7V7CK0Tx8iRls4Fz2yTkBEpAvK/3M5Xrebdv1jRhI3815E5OIz9OjJoFgLB/bsofnwYVqPHeOoq4Ght96GnLu6P+Wz99WX8Ym7N53+0TGIyMXX77phtPN8vJd2+8t3MHhCImH9B3A1O+JqoOxXT9DW2kq7626eyPWpdyAiwaF3ZCR9Bg3i8w8/pF1TXS29Bg7impiRSNfV/Smfva++jE/cven0j45BRC6+ftcNo53n472021++g8ETEgnrP4Bvkro/5bP31Zfxibs3nf7RMYjIxdfvumG083y8l3b7y3cweEIiYf0H0F0GRES6oDbvJTwffUQ7Y5++jPmHaYjIpdPDGEbs308jJCSEdp+VOti33Y6cG+/+z/low/P43HCXlQExIxGRSyf6tkkMHnMjPrteWM/VbtcL6zne3Ey7ASNHMurOKYhIcBkUO4aY21Pw+WjD87QcPYp0jXf/53y04Xl8brjLyoCYkYjIpRN92yQGj7kRn10vrOebxLv/cz7a8Dw+N9xlZUDMSETk0om+bRKDx9yIz64X1nMuDIiInEXL0aPs2fJHfEbdOQVj376IyKXVd/Bgrr9zCj57Xvojcm72vLSFttZW2g0cbeHab09ARC69UVP+jp5hYbTbv7McV0kxV6sD71fS8HYRPqOm/B0iEpxGJN9KxPARtGtuPERt3haka/a8tIW21lbaDRxt4dpvT0BELr1RU/6OnmFhtNu/sxxXSTHfFHte2kJbayvtBo62cO23JyAil96oKX9Hz7Aw2u3fWY6rpJjuMiAichb1r22j5egR2g0YeT2Db7oJEbk8rku8mb7mIbQ78umn7Cu0I91z7OAB6v9swyf6tkmIyOVh7NuXEbfehs/Htle5Wn1s24bPiORb6R05EBEJXtG33YbPx9te4fgXXyBnduzgAer/bMMn+rZJiMjlYezblxG33obPx7ZX+SY4dvAA9X+24RN92yRE5PIw9u3LiFtvw+dj26t0lwERkbPYV7gdn+tuvhkRubyuS0zEx/lGIdI9zjcK8THf9C36ms2IyOVz3c0TCe3Vi3YH3q+kaW8dVxuvez+ukndoFxISwnU334yIBLeIEdEMGHk97VqOHsX5ZiFyZs43CvEx3/Qt+prNiMjlc93NEwnt1Yt2B96vpGlvHVc75xuF+Jhv+hZ9zWZE5PK57uaJhPbqRbsD71fStLeO7jAgInIGh2pq+Ou+T2jXe+BABoy8HhG5vIbEj6WnyUS7A+9X4nW7ka5zvVuCz5D4eETk8goJCcEcF4/PZ++WcLX57N138THHxRHaqzciEvyGxMfj89m7JciZud4twWdIfDwicnmFhIRgjovH57N3S7jaud4twWdIfDwicnmFhIRgjovH57N3S+gOAyIiZ3Dg/Qp8Bt4wGhG5Mgy8YTQ+B96vQLqmubGRQ7s+ol1o7z70jxmJiFx+A0db8HFXVnC1cVdW4DNwtAURuToMHG0hJCSEdu6KndDWhnSuubGRQ7s+ol1o7z70jxmJiFx+A0db8HFXVnA1a25s5NCuj2gX2rsP/WNGIiKX38DRFnzclRV0hwERkTM4VLMLn4gRIxCRK0PE8BH4HKqpQbqmcXcNPhHDhyMiV4aI4cMx9OxJu0O7dnG1OVTzET4Rw0cgIlcHQ8+e9Bs+nHbHW1o4tLsG6Vzj7hp8IoYPR0SuDBHDh2Po2ZN2h3bt4mrWuLsGn4jhwxGRK0PE8OEYevak3aFdu+gOAyIiZ/DXfZ/g09dsRkSuDH3NZnz+uu8TpGv+um8fPn0HD0ZErhx9B5tpd7zlC458+ilXiy8OH+bYwYO069V/AD1NJkTk6tF3sBmfvzr3IZ376759+PQdPBgRuXL0HWym3fGWLzjy6adcrf66bx8+fQcPRkSuHH0Hm2l3vOULjnz6KV1lQETkDLwHDtDO0LMnxj59EZErgymiHz7eAweQrvEePICPqV8EInLlCOvXD59jBw9wtTh28AA+pn7XICJXF1O/fvgcO3AA6Zz34AF8TP0iEJErR1i/fvgcO3iAq5X34AF8TP0iEJErR1i/fvgcO3iArjIgInIGLUeP0q5nmAkRuXL0DDPh03r0KNI1rUeP4tPDFIaIXDl6hoXh03L0KFeLlqNH8ekRZkJEri49w0z4tBw9inSu9ehRfHqYwhCRK0fPsDB8Wo4e5WrVevQoPj1MYYjIlaNnWBg+LUeP0lUGRETOpK2NL4UgIleSkBB82trakK5pa2vDJyQkBBG5coSEhHBSG1eNtjZ8QkIQkatNCCe1tSGda2trwyckJAQRuXKEhIRwUhtXq7a2NnxCQkIQkStHSEgIJ7XRVQZEREREREREREREREREgoQBERERERERERERERERkSBhQERERERERERERERERCRIGBAREREREREREREREREJEgZEREREREREREREREREgoQBERERERERERERERERkSBhQERERERERERERERERCRIGBAREREREREREREREREJEgZEREREREREREREREREgoQBERERERERERERERERkSBhQERERERERERERERERCRIGBAREREREREREREREREJEgZEREREREREREREREREgoQBERERERERERERERERkSBhQERERERERERERERERCRIGBAREREREREREREREREJEgZEREREREREREREREREgoQBERERERERERERERERkSBhQERERERERERERERERCRIGBAREREREREREREREREJEgZEREREREREREREREREgoQBERERERERERERERERkSBhQERERERERERERERERCRIGBAREREREREREREREREJEgZEREREREREREREREREgoQBERERERERERERERERkSBhQERERERERERERERERCRIGBAREREREREREREREREJEj2R87N/Cz8c9H2eoxNDfsbrtUtJNdEl7zwewnf+nS4b8q1UbhwawZik7/Ld+7/Pd2MjOJ13Hg/hO/9OgKXvtPGzJESko8ocTLNX4S/tydfItUYiInI5lDw7gdtX02XRcYlED4khKTmZ9EnJxEZwebW6KVyfj8c6lzQzl4yn2s7aLTZsO3ZSWOOmnXlUImNutJCWNJV0q4XqZydw+2r8pLJ22wrSzYhcmVqbqCorovBtB3kfVPChoxYXfxMdl0hCbBzWpDuYmmzBbOTq0+ym0PY8ubYKCosrqKNdFBOTYhkXl0zajKmkmKtYNn4uWfiZspS9y62YkQujiSrbcxQOX0jGjZzCZVvEiMV2/GWtKyUzDpFLzrP9cYY8kk9nzHNXUz0/ERNXALeD7C1N3PdQKmZOVfLsBG5fjZ9U1m5bQbqZ4OR2kL2lifseSsXMqUqencDtq/GTytptK0g3IyKXWMmzE7h9Necl7cnXyLVG4uOyLWLEYjv+staVkhlHgJJnJ3D7avyksnbbCtLNiGBAzktDwQae4zQansP+jpeLpeF9O/aCLfz+8R9y95j+jJz7HB8eRkRERL7h6iodFBZsYlnWAhJS78L6lI2qI1wWLsd60mfchXVlFV4ulWZKcuYx9r5FZG62U1jjxsdV46Bw63oWbq3Cg0gwaaLKlo3VOpmEHy1h4bp8Ch21uDiprtJB3uYcMh55gBF338OcdQ5crVw9nHYy7rkLa9Z61u7aFa8AACAASURBVBZXUIePk5JiO2uefRxbLXKReWpsZP7oH0lYvB5PKyJXODc2Wz6n48p3UNzM5dXqpnDdImKnzCOzppmrXqubwnWLiJ0yj8yaZkRERM6VATkPtbzy/BZOr4EnNtvxcGnU/uGHpN7/e3Z4EREREfmKm8INS0iYtwKbk0vH7SD70WmM+FE2efVcWtWbWPC0AxdnYIliCCJBoqmCZT/6RxIWr6fQTde4a8ldOY+bH86hpImrQBO2dYtYW88ZJBJ7rRG5SJpqyX1qHmNnLiHb4UYkKNS/RW4Bp+fOIa+4icvF5VhP+oy7sK60U8fVz+VYT/qMu7CutFOHiIjI+TEg567GzgvbOLPVW7Dv55Jp+NN8fvybHXgRERER8VO5ibTHcig5wiXgJnfFPDILnFwOrtoKyjmzlGHXYkIkCHgcZM2fS5bDzblwOVZx+/wcSo4Q5Gop38xZWIg2IxdJSe49zNngwIVI8Khz2LBxZmvsDjxcBi4bC36UTV493wwuGwt+lE1ePSIiIhdET+Sc7Xjp99jx9whLf2lnyb/v4KTnePX//SczvhfBuVj6Ths/S+IUXk8tO176PUsWP4W9gQDvLP49rz70f5gxEBEREblKZa0rJTOODprxNDXRUOUg74+ryCpwEqByFTOfjaN6QSImrl6fOnfSUVrWVtZOi8LECc1NeAhH5MrnJPdX81hWSQeRpDzwML+4L5WxUeGYOKG1GVe9g42rl5NZ4CRA5SoW5Sbz2lwLJoKUez9VdGCZi231w6RE8CVvUzMmIyfEk1lWSiZyuZitK/BaEbnMqsnb7CDA7IfJKl5FVjUnbS2icGEqaRFc0SY+VIr3Ib4xJj5UivchROQKlbWulMw4zpnZugKvFZHzYkDO0Q7sL+wgwE+/yyP/8ADjCPTcxldp4MIyRcRwy4P/ySsvLeUWOnoO+zseRERE5JvGSER4JLGJVjKX/5HyJVbMBHKtW82aaq5qx1rddJQQE4WJrxjDiTAicsVz2bKZU0CgyESycv6I7ZHpTIwKx8RXehgxRyezcPkfKV9ixUygkqc3YfMQvFqa8dLB8JGMieBrpnAjIiJfq3aQW02AhRNnYf17C4HyyS12IyIiIsGlJ3JOvNtf5D/L8TOEn039DqaE/jyQADvKOWnzBl6puZ8fjOKCMyU9wtKfLuGOXxPgxfdrefofxnGuPDV2Xn1pCy8WlLGj4B1q8RnCmMk3Mib2Fr77Dw/w/SljiOiJiHSD11VBns1GXkktVbscVLn5ShQTk65ljGUyKal3kB4XiT+vYxWWH+Xgws/s1XgWJGLidJrIy5pM+lb8zMJWvJAUI6dqdVNSsI2N2+3YCiqoo10UE5MSucWaSsbkZKLDOb3KHEyzV3FSKmu3rSC9tYhl/7We3CIHVe5IYhNvJT19FhmTY4jgJE+9A5t9O3klVZQXV1CHTySxiSOJjY7DOmkqaUkxRPRARM7ISOyMR3mm2kHaZjcnVfCbAgcZlkRMdKYZV6WdjS/bsdXV8qGjFhdfGRZPyrUxJExK5HtTrEyMJJDLRvrUJeTRGTtzpk5gDl+Zl4P3oXgCNeOqtLPxZTu2ulo+dNTi4ivD4km5NoaESYl8b4qViZEEctlIn7qEPDqXNXsCWXxlylL2Lrdi5tx56orIy7eT+0EFHzpqcXFCZAwp4xOxTrZyX2o8ZiOdcJP76F3MKeCkpJ9TtWo60ZyqPOcukp52c1IkWeteIzOOU3jeXMGQhZs4ycKyjRtYaEGCUbOD1U/ZCRRJxs9XkJkQzukZiZ3xKM9UO0jb7OakfNa++QPSpkVxes24Ku1sfNmOraqKwkonXxoWT8q4eNKnTCctKYaIHpxBBcvGzyWLk9KefI1cayS0uikp2MbG7XZsBRXUccKweNKSUkmfMZ00SzgdlTw7gdtX07mCJYwoWIJP1rpSMuM4oYJl4+eShZ8pS9m73IqZ02h2U2h7ntxXitjmqMVFJLGJt5L+vek8OCUecw+gMgfT7FWclMrabStIN3OSy0b61CXk4WdeDt6H4ulMybMTuH01AbLWlZIZx0kuG+lTl5DHSVnrSsmMrmbt08/xh2I7JfUQHZeKdeo9PHZPIuYe+GnGVWln48t2bHW1fOioxcVXhsWTcm0MCZMS+d4UKxMj6aCCZePnkkXnsmZPIIuvTFnK3uVWzIDLtogRi+34y1pXSmYcZ+apxfZmPnm2aop3Oahyc0IksYljSflOKvdZU5loNnImLtsiRiy2c9LDvFE2l4mAp76IvC12coveorDGDUQSm3gr1rut/MSaiNmIXDWaKSx4nnL8RM7FOt5IQv+pJFBNOSflvfIWddbpRNMdTdQV2VlTYKP8gz0U1rhpZx6VyJgbLaRPmU5aUgwRPQjgsi1ixGI7nSpYwoiCJfhkrSslM44vlTw7gdtX4yeVtdtWkG4GqteTdF825ZyUsGADxbMtnJ6Ttf9rGhlFnGRZSPHGWSTQQWsTVcV21hbYKK/dR2GlEx/zqETGxMSQlJzMnMnJRIcTwGVbxIjFdjpVsIQRBUvwyVpXSmYcXyp5dgK3r8ZPKmu3rSDdzBk046q0s/FlO7aqKgornXxpWDwp4+JJnzKdtKQYInpwBhUsGz+XLE5Ke/I1cq2R0OqmpGAbG7fbsRVUUMcJw+JJS0olfcZ00izhnI2n3oHNvp28kirKiyuowyeS2MSRxEbHYZ00lbSkGCJ6IHLVctkWMWKxHX9Z60rJjOO8uSptbHzZzkvFdkrq+VJ0XCIp462kzUjFOiycrvDUO7DZt5NXUkV5cQV1+EQSmziS2Og4rJOmkpYUQ0QP5DLoiZwDL28XPEcDfob8gNRbTMA4Uv95HJTv4KRXeWF7LT8YFcOFZ+LGcTOALfhraPFyTlpq2bLoh/z4N3Ya6EwDH25v4MPtdrasfoIfjprB05s38OMEEyJyNk0Url7E7GcduOiMk5JiJyXFDtauW0HWlEXk/sdMEnrzJVPircyLzCHLzUnriiiel0iKkc41VVC4lQDmuckkGTmFp3w9Gf+RTV49HTgpKc6npDif7N8lsvCRRWRZYzDRRZ/amPPTJeS6+YqbKkc+WQ4nQ/JXM2cY0OokL/txFmxw4KIzbqocbqocDvI255AxLJXsFUvJsBgRkTMJxzrjn0jYnE05J7nyK9k5P5GJdOBxkPXoEpY53HSqvoLC+goKi/PJXrGKtEW/Zm26BRMXgMdB1qNLWOZw06n6CgrrKygszid7xSrSFv2atekWTFxiTjuZv8omu8jJKdy1FBbUUliwiUyiSFv0a9akW4jAXySTJ6dCgZ2vFRdR7JpOtJkOnJSXugnkpnCXk8y4KAI1U75jEwEi7+AWCxKkvGVFLHMTKPlhFk4K5+zCsc74J1Kqiogen0xSfDxJ8TFER4ZzOp4PNrEw6zlya9ycor6CwvoKCreuJyMykYWLf86yyVF03TE8H2wi47EV5NUTqL6CvPoK8jZnMzF9JesWJRPNpeX5YD1zFmRjc+PHTZUjnyxHPlkbZ5L3q0VYuYI0VrBs/lyyKvlaXaWdNZV2TNFvsyzJyJc8DrIeXcIyh5tO1VdQWF9BYXE+2StWkbbo16xNt2DiEmt1kvf0cjLXFVFHR26qHHaqHHbWrIToKYvIXTKThHC6rtVJXvbjLNjgwIU/N1WOfKoc+WT/LpllT/+ahRYjchVo3klhvht/5umJJBkBSyLpFiiv5qQiG4X105kzjC7x1uWzcMHjrK3nFK4aB64aB4Vb15MxbDprVj7KnGgjF5UllYzkbDKK+Fq5zUH5bAsJnEa9g7wiAiRYE0kgkLcun4ULHmdtPZ1y1Thw1TgoLNjEst8lsvDnP2fZpCguNc8Hm1iY9Ry5NW5OUV9BYX0FhVvXkxGZyMLFP2fZ5Ci67hieDzaR8dgK8uoJVF9BXn0FeZuzmZi+knWLkommE61O8rIfZ8EGBy4646bK4abK4SBvcw4Zw1LJXrGUDIsREekiTwXZv1pCZoGTjuoqHaytdLB23SpSHvgJ2fOsxPamc61O8rIfZ8EGBy4646bK4abK4SBvcw4Zw1LJXrGUDIsRubQMSPd53+bVXzXgb8iDqXzHxJfGTX6AcQSyr9rCDq50DWyZ+x2+/xs7DXRRzRbmT/0BL9QhImfUTMmzC7A+68BF19QVrGD6r+y48InHOstCoPXYypo5Hc+OItbgL5J5tydiIlBdweMkzc0mr54zczvIXnwPdz1bgZeu2Ef28iXkujmVJZmEYZzgJi/rQdI3OHDRRfV2Fs7/JblORORsLPGkRRLI/TrvVBPoSAXLFsxjmcNN1zjJW/GvZBS4OW9HKli2YB7LHG66xknein8lo8DNpeStzME6exHZRU7OzkneigdIyrJTRyDzTcmk4M9OcXUTp3BVYCvmFIU7qnDRURXFLxPAPD2OsUiw2rljPR2lTE4kmi6yzMK2bjVrFsxizuR4YiPDMdG5uoLHSZq1gtwaN2fldpD9yINYcyrw0jWeolVMn7WCvHrOqCR3AQu3OLmUvJU5TJ+Vjc3N6VVuIu2xbAr3c8XIe3o5WZV0YiYpNxn50pEKli2YxzKHm65xkrfiX8kocHNJHalg2cMPkr6uiDrOrq5gBUmzHifPSRdVsTrrQdI3OHBxBu4iMuevovAIchXwlhWxzI2fSB6cOBYT7Syk/L2FQA7WvFlNV3grc7hrxuOsrefs6vPJeOiX5Dq5yKKYencqAaqfx1bJadU5bNjwl0hGqoUAThsZDz3O2nq6xu0ge+ESln3QzKVUV/A4SbNWkFvj5qzcDrIfeRBrTgVeusZTtIrps1aQV88ZleQuYOEWJ6dyk5f1IOkbHLjoono7C+f/klwnItIVTjsZs+eSWeDkzNwUblhCwrwcSo7QCTd5WQ+SvsGBiy6qt7Nw/i/JdSKXmAHpNs+2F3gKf0OYPy0VE19JSOWBBAKVb8BezkXg5YMdW+hoXF8T3eXd/jQ/fr6Bk27hxzlvs6epjba2Ntra2jh6cA+vP/ldhuCn4QUe+OUWPIjI6Xgr17NgdQXd5XrlOTZW87WEpKkkEGhj6U68dKaJ4qJNBLD8E9Y4AlWvZ/aj+dTRdSWrf0rmm02cXTXl1XQq4e8TSQC8jv9mwStuToonIyuHqrdK8ZaV4i0rxfPGVmzzkzHjx21jzn/Z8SAiZ3YtsUl0UE2dq4mTminZsJysSrrJTW7ONso5H82UbFhOViXd5CY3ZxvlXCJOGxn/topCN91St3URs5+twIufYYmkJxFgzY4qvATyVFeQRydeqeDDZgJVV5DnJsB9E8ZiQoKTkw8r6SCSlNgoLjRvZQ6zH82nju5wU/j0T8mwOemKwldslNA1ttV5FDZzaXiKyPy3VZTQBZXrmf3kf3OlKK+uplP3JJMUzgnNlGxYTlYl3eQmN2cb5VwqTnJ/9VOyHG66pT6f9MdyKDlCF9jJfcVNl7jXk/0/TiTYNWGzrSdA5L1MTTTikzBhKgkEKn/ZQTln4bKT8W+rKKEb3DbmPLaeci4uc5KVOfhzs/oNB14646TwfxwESLaSMgw/bnJXLiHXTTdVkJXvwMOl4a3MYfaj+dTRHW4Kn/4pGTYnXVH4io0Susa2Oo/CZgJ4Hf/NglfcnBRPRlYOVW+V4i0rxVtWiueNrdjmJ2PGj9vGnP+y40FEzqya7McWsbaerqtcxcyVRXgI5HX8NwtecXNSPBlZOVS9VYq3rBRvWSmeN7Zim5+MGT9uG3P+y44HuZQMSDd5sP/pOQIMmU9qEn7GMePhVALtYEPBDi40b/FTLPk1p0j91hi668Pi52jAz/fm88iDtxDTl6+ZImJIfexFNjx2C6n3/Jif5bzI6zv2cHDVDCIQkc41UfjyKsrxF8WcrA00FJfiLSvFW/Y2DVtWkBlHB9UU17r5miWVjGQCuPIdFDdzqqYKbJsJkGBNJAF/bnJzsikh0MT0pRTb38ZbVoq3eDvFT85kIv7crFmdTznnykL6BAvtqirzceFn8kwWTosnujdfM4VHkTL316ybG0/KlJlkZq3AtnErDYtTiUBEziySocM5RUNTE19rcrBxdTUBhk1nzcbteMpK8ZaV4i3eTvlTc5lIB9UVVLn4G7OV3LJSvGWvsXYKHaSydlsp3rJSvGWleB+K50tNDjauribAsOms2bgdT1kp3rJSvMXbKX9qLhPpoLqCKhd/Y7aSW1aKt6yUN+Zxiqx1pXjLSvGWleJdbsVMd7jJXbmEXDeBhk0ne91WPI5SvGWleO2byVuQSjSBSlb/lKziZk6KIiHZQoCXK9lJoOrq1+ncNoprCFD3lyLK8TcL63gjEqz2U1dMB2OJjuTCaq4g+8lVlBDInDiX3C3b8ZSV4i0rxbMthzUPJGLGn5vcxctZW0+XTUxfSrH9bbxlpXiLX8M2PxkzHbjzKa7haxMfKsVbVop321LS6GDKUvaWleItK8VbVkpmHN1Snr+KNW4CRSaS+dRmGhyleMtK8dg3sDY9nnYut5srXUZyPBGc0ORg4+pqAgybzpqN2/GUleItK8VbvJ3yp+YykQ6qK6hy8ZV4MstK8ZaV8sY8TpG1rhRvWSneslK8y62Y6R6XLZs5r7gJFMWcRTlUvVWKt6wUr2M75b9bSNowAlWuYuazDrx0UWQyWc9sxeMoxVtWSsPGpWTEcQqbowoXEtQ8DmxbCWCeOYGJ+LGkkpFEoOptFFZzBs0Ubl5OrpsA5kkPY3v5bbxlpXjL3mbvMw9jjSRQ5fOsfbOJdmbrCrxlpXi3LSWNDqYsZW9ZKd6yUrxlpWTG0XURt5I+NxJ/rpwiips5Vb2D3GICzJmRSjR+qreRXUAAc+Jc8l5+G29ZKd6yUrxvbeWNRVbMdLC5mmr+xmxdgbesFO+2paTRwZSl7C0rxVtWireslMw4uqe5guwnV1FCIHPiXHK3bMdTVoq3rBTPthzWPJCIGX9uchcvZ209XTYxfSnF9rfxlpXiLX4N2/xkzHTgzqe4hgBVlfm48DN5JgunxRPdm6+ZwqNImftr1s2NJ2XKTDKzVmDbuJWGxalEIHLlyZo9AdP4CZjGT8A0fgKm8RMwjZ+AafwETOMnYBo/AdP4CZjG51DCxeWyPUdmJYHiZrJ243Y8ZaV4y96mYeNSMuII4Nq8irXVBKiqzMeFn8kzWTgtnujefM0UHkXK3F+zbm48KVNmkpm1AtvGrTQsTiUCuZQMSPfUvMhzfyDAuH/7LrcQKGby/aQSaMf/fpV3uABavHgaPuTV3/2Q1FuW8A4dDPkZ373NRHd5WxoIUPsBtR46YSL1ybd5fdPTLH1wBqkJMUSYEJHTCsf6yHaqNq4mN2suGVPiiZ39c7KnWYgw8hUjEdGpZM6bRUd17v2cFEXKlGQCuPMp3NlMR54dRazBXyIZqRYC1L/F2gICmO9ZSf4iKwkRRr5kDCfBuoh1j6cSoPp58hzNdEncTNZu3I63rBRvWSnesg0stPClY61uAjj3UNdEJ4ykzM/BtnwRWdNSSbFEEWFERM5RnnM/XwtPZtkbWyl+ZgVrHppJWlwMCx97lDmWcEx8xRhO7OQf8ovZdLCPBg/nLjyZZW9spfiZFax5aCZpcTEsfOxR5ljCMfEVYzixk3/IL2bTwT4aPFx81dvILiBQ3MO8sfHnZMRFYerB30TEYJ29AtuTVsz4c5O9YRt1nJQwYSoJ+HG/zjvV+KnmHbubzrkprHJykpviEgcB7kkkwYgEK5eTKi4+T1E+WdUEMN+9lDeeeZi06HBM/I3JHM+cR1ayaV48gYr4j3wHXs7OPHs1ry2ykhBh5EvGSFLmLmXlNDpw07C/iYuvAtv6agLFk/W/V5I1OYaIHnzJFGEhfdFK8mZHcsWJTCbrmdfwlJXiLSvFW1ZK9qRwvhSezLI3tlL8zArWPDSTtLgYFj72KHMs4Zj4ijGc2Mk/5Bez6WAfDR4ugWo2rrMTKJ6sdX9kTXo80b35mx7hxCbPInf1UtIjCeBat57cerogmTU5K8lMjMLUgy9FWKxkL15IAh3UOPkUCWZ1b+azFn8WfjIpnkBRpNyZSKBqfvNmBafleYvcHDcB4h5m05NzSYky8jdGzIlzyX18Fmb8uVljd+DhYjKSNHE6ZvytJ6+4iY7K39xEIf6mYx0XTgDLLAoLNvPG75aSPXs6KaNS+cWSh7FGGfla7ygmpj/ML5LoYD8Hm7joPEX5ZFUTwHz3Ut545mHSosMx8TcmczxzHlnJpnnxBCriP/IdeDk78+zVvLbISkKEkS8ZI0mZu5SV0+jATcP+Jvwda3UTwLmHuiY6YSRlfg625YvImpZKiiWKCCMickZOtm21EyByJnkrF5FuCcdEOyMRFivZv1pKGv6q+U2BAy8nHWt1E8C5h7omOmEkZX4OtuWLyJqWSooliggjcokZkG6p/X9beBV/43hgyjhOMSqV+6cQqOFpXtnupTuW3BJCSEgIISEhhISEEBISQkhoL/oPvZG7//U53qGjIdz/2/mkmug2U99xBCh/gjvG3MgDi55jS3ktHi8icq6M4URbEkmb9jDZy3MoX5CIiVOZ+lxDR+VHjuEvetJ05uDPzR9KduLFXxPFRZsIkGwlZRgB6iqKKMSfhZ/MSCaCU0VPuYeF+HPzh8pazs5C1v+3kHRLOJ0J620hQHUO1hn3MGdlPnnVTjzNiMglYAqPIiExlTnzFpG7bjPLkoycykiv3nRQjbeZ82IKjyIhMZU58xaRu24zy5KMnMpIr950UI23mYuu5M3nKcdfJAvnzWJibzoVbX2UldMIVGSjsJ6TLPGkReKnGttfnHytvgpbNadVWF2Lh680VVFcQICM5HgiEDkTNzZbPoGS+cW/WImmM0YmPvQoyywEcOUUUdzMWSTyixmJmOgonITEVDpqONLMRVddQZ6bQNNmkRFn5FThWGc8jJUrS9oj/05mYiQmOmcKjyIhMZU58xaRu24zy5KMnMpIr950UI23mYuvsojfVBPAPHseC+OMdCrKSvbi6QQqItfh5KxmzyJ9GKeyxGOlg+pmjiHBy0lhQREBLFNJsXCK6EQrKQRybXqLwmY65akoZS2B0u6bzsTenMKUlMq8UYmk3TOX7CdX8saW12hYnEoEF5cp8Q5+YiHAGrsDD/6qKXy5mgCzrVgjOIUpMoaJyVYyFvwc26YVzBlGJ8IwhdPBfjxHuMjc2Gz5BErmF/9iJZrOGJn40KMssxDAlVNEcTNnkcgvZiRioqNwEhJT6ajhSDP+wnpbCFCdg3XGPcxZmU9etRNPMyJyrpwV2IoJkDBrOtYIThWVSsZsArjyK6nipLDeFgJU52CdcQ9zVuaTV+3E04xcQQxIN+xgy29fJUDCA6Qm0IkYUu9JJVADT2x7Gy8Xzy3/P3vwAxcFYfj//3WnHidFHp55SFFQ2qEGgnmDogyvWGftE+Q+Oiz3yOna1M8+P23ta9bP9uHzq1+ztjb8fDZ1czl9fKzx0fIHLZNiIf2hMJwiWEKZkCiKeongHzjB+0nu8g4BwVTE3s/n/7OGFyeGcS7i/+XfuJc29m7j5V//mO/H30BofwM33Pp9fvbLF1lTUkljMyJyPhxtoKqikKwVz5P29P9yVhYHrvsJUJtTTJGH0zzlFKwmQNp9txOJPw9V2/MJdA1hFtpnuoG4FALUVlRSy1nYx+OKMdGRuLGTcNGGu5KsFU+TPvl+whLHEP3wXOYsziG7oobGFkTkImtsqKGkMJfMX83mp6u4qBobaigpzCXzV7P56Sp6QA3bNrsJNB7XaBMdC+HWJBeBiikod3NaLOMmWfFXsKGUWk6pq6yggNNsM+ewwM5pq7dQwSmNHxezBH9OEu0hiHTKs4OSPAIlOkmOoBN2klOsBFpH0XbOIobhEbSrv8lET6irraGEQOlJo7DQgQgHaYlcQiYxNcnKuWpsqKGkMJfMX83mp6voEVWfFVNLoMkJozDTMUtsEukEKthcTi2dSxt6DWbaE4TZilxOKvJZUkiAOJeDONoR4SA9kUDuZeRu8tCequotBLKTGGWlfbHMW7WYrCdmMcOVREKkFYuJi8BO8vfsBHgtl9xaTqsoJqsCP1bm3TYKM93hoc5dyYb1OczL+D9k5HHxeXZQkkegRCfJEXTCTnKKlUDrKNrOWcQwPIJ29TeZOJu4sZNw0Ya7kqwVT5M++X7CEscQ/fBc5izOIbuihsYWRKSLGneWk02gSNsg2mdiuN1JAHc55bV8LW7sJFy04a4ka8XTpE++n7DEMUQ/PJc5i3PIrqihsQXpQX2RrivJ56USAsQ/6CSe9kWNexAn+eTj59dryH/Syb0Wzq8wJ4/99vfMnzwcM+do6HR+v+pD7pr0IpW0r7JoDX8oWsMfngbChvPgrGeY/+8TGG5BRLqosbaCgs1bKChaz4ebi9lQTTeFkOycBK+t4mvuHIoqZpEcw1caNxWSib9U0hOtBGpgbw1t5DN1/Bim0kV5NVQ9BzY6kWgnmk5EpJL5XCmux3Ooon1VZfksKctnyVLAGkX6pJnMS3cSHYKInKNk6yDa56G2opj1m4speLeUgqJSqrhYPNRWFLN+czEF75ZSUFRKFT3tAHuLCJQSzXATnbKF3UBb5fsOAFZ8RsWlYmMZtfxTXimb57twhXgo2bwKf5NjJnFrcyZU8E+FfFgxiwQ7bClbR4AUJ+NsSG8WaiWSC+ygmyraiI8iks4NCR8F5HOam70HGoAQOmQ1E0T7bGE30BOajh6gregwKx0bRKQdKOLSkGgnOoQu8lBbUcz6zcUUvFtKQVEpVfS8WncxgZzERZnolHUQ0bSxvYY9gI2OmU1BtG8QkaOBPOQyUVK0jhL82UlPtNO+cJLvdkBRMf4y1xczLzEJC4GajlYQ6BrCLFxy4sZOwvXC0+Tik0/WhhrS7w+nVUnROkrwY/8haQ4TnWppoPyTYore30huSTEfFldSW8qcQQAAIABJREFUSw876KaKNuKjiKRzQ8JHAfmc5mbvgQYghA5ZzQTRPlvYDZxVRCqZz5XiejyHKtpXVZbPkrJ8liwFrFGkT5rJvHQn0SGIXJIyVmxkXgw97lBdDW1lP3kP5ifponyq9gE2TolIJfO5UlyP51BF+6rK8llSls+SpYA1ivRJM5mX7iQ6BLnIjEiXffi337CZQJvnjsZgMGAwGDAYDBgMBgwGAwaDAcOwH5NPW39gzfo6vrGw4ThTnDh/9Bi/f/kD9lS+zW8mD8fCNxM18c98UrmW30weThhnsXcbL//y+4wYfh8vlDQiIp2r257LvH+/H8v4h0h78nkyXytmQzXnxJI4jnlW/LhZXFTKKR6KNqwkwP1JJFvoGcFBmOlcZMpTlLy+kAWuKGychbuSrMVziZswm8wKDyJyNm727OQMYSFBBGqgPDeTtNTbuH7ybKY+v5LlRaVUcTE0UJ6bSVrqbVw/eTZTn1/J8qJSqri8lBxtwp95lIPJ+FtF7uYGoJyi1/HjJC7KhH3kJE6roGh7DVDBh/lu/LmSYrEhvZrpKixW2sinah+XpL1HPXRqdDhD6O1M9A/m0hESRH/OpoHy3EzSUm/j+smzmfr8SpYXlVLFZabCQxOdiw6zIt8GpeSurCBQBfMmj8E8egzm0WMwjx6DefQYzKPHYB49huhnijnD6nwK6ui9Im4nPYUAuXnFVNGqgoLcCvzFuRzE0YEWNwUr5hPnGkfcw3OZsXQV2cWV1HJ52XvUQ6dGhzOEbyYy5SlKXl/IAlcUNs7CXUnW4rnETZhNZoUHEbm4IlOeouT1hSxwRWHjLNyVZC2eS9yE2WRWeJCLy4h0TWM+axft5Xx48W/51NE1z3zoxev14vV68Xq9eL1evF4v3j2f8PZbb/P2st/wb5NvJczMeWOOvJfHXv6EPcf2sGnNn/m/Z97L8DA6tvcNfjF+PvmHEZEONJYtI3XSfDILawgQEUvaxGlkPruQd9a8Sd0fZ9ElplEkp1rxV7tqIxs4ybOF3BUEmOO6HQs9I9k6iK4whycx59nVfFH0JkUvPMW8iUlEW+mYu5B5P1tEwVFEpFMHqNpOG1air7FymocNS2cT9+RKcqsJEBnjZMYjc1n+38soyfuA3JmcZx42LJ1N3JMrya0mQGSMkxmPzGX5fy+jJO8DcmfSqyVbBxHANArXwwRYsrmcxopSst2clphEog0s9ljSOC27tJK66nJyK/DjIC0+HOntokj8Hmco+KyGLmsoZN6/Z7K8zE0jF1akJYRvhWYuHeFWBtAZDxuWzibuyZXkVhMgMsbJjEfmsvy/l1GS9wG5M+ndEq3YEIHG4vdZ7OY8yCF3cwO9lxWXK5UAhbkUVAMVxWRV4MfBDKed9tWQlfEgroW5lLvxYyXakcqc2U+R9ceXKH/nNZan0KtFWkK4GMzhScx5djVfFL1J0QtPMW9iEtFWOuYuZN7PFlFwFBG5yMzhScx5djVfFL1J0QtPMW9iEtFWOuYuZN7PFlFwFLmI+iJd0vjeG/y/ezk//vIirz45gelDubSZw4h/YDrxD0znmUXQuHcbmz/8gLV5a3h58RtU4mfvC7y47hc4J4YhIm14ilnw80VswE/MNLKf/zEum4kADXSRieSUHxK3LJMS/sn9v6wvm8aoI4Vk4sc6DddoE2cyYbHQxjTe2TSLBM4fS3AQ3WKyEjculbhxqWQ8AY3uSraUlrKuKJ+s1YVU4ce9kuWFPyQ5xYqIdKC6nNwK2hhP4lC+1lj8ZyYtLsVfwrSFrPpJEjYTAQ5xfjUW/5lJi0vxlzBtIat+koTNRIBD9IRBhCUCRZyWV862p13YTHSodnc5bUUPCiGQibj4SbBiFV97vYzsoeWUcFpcUjSRnGSLItEO2RWckl9Bbmw5BfixJxEXgfR6JuJiU4Ec/BWsL6ZqQiqRnF3dxnwyC3OgcCUzIpKYMXkSM1xJRFs4JdRKJG1srqSKWCLpWFX1FgLZCQs10ev0MdFWyW43xFhpXw3byjjPPBw7yrkJDsJMxxqL/8ykxaX4S5i2kFU/ScJmIsAheobN6gCKOS2fkkoP6TYTHaqtoYQ2IgZhQcRD0QfLqOX8WL4mn3njUonktKBgO1DBaflU7QNstKtk2URmb4zl1rEOkkfaiY+8BluIiYvBkuRiDjlk4lNM9uYakpsLKcFPkovkCNpV9dpzTF3r5jQr6fP/i8xUO5Y++HFTRA8ItRJJG5srqSKWSDpWVb2FQHbCQk1cVCYrceNSiRuXSsYT0OiuZEtpKeuK8slaXUgVftwrWV74Q5JTrIjImYKCB9HWvGUbyYjj/DBZiRuXSty4VDKegEZ3JVtKS1lXlE/W6kKq8ONeyfLCH5KcYkUuDiPSBXW88dcXOH/eYM17lfQ25rDh3PrAdJ5ZtJZP3nqMMAK9XF6JiJypcVMhC9wEmDPzx7hsJtpqPFJPl9kdpNvx42ZxUTEFG1biz5bqINFEO0KItjsIVMa2ai4pZmsUCeNSyXhiISWLpmAjUFbVbkSkYyX5qyggkG1aEokm/slD0QfLqMXfFP7zJ0nYTLTh4dhRziMPRR8soxZ/U/jPnyRhM9GGh2NH6QHhDI+3EmgduZs8dMzN+vX5BEoiLiqEtiyxY5iKH/fbZK7Mx1/yjVGcYudWp5Wvud8mc2U+/uK+5yAOuRxYxjiZYSVQ4SIy323grI6WkvmnHL5WXciS52dzz8piGvkn0w3EpRCoKJ+CajrmKWV9vpsA1iTiouh1bNdFE0eg7KIt1NGB6mKyizg322uopT2VlBRxAXgo+mAZtfibwn/+JAmbiTY8HDtKj4gc5sBGoL9u2EIjHavdnE82gVwjo7Ag33p175O1gvOnMJ+CagJE3+igrYLPamhfDSUbK9lQlEPm8/NJe3gi1985m+XVXBymUbimWfGXW7iKJX8vxt/UCU4iaU8NBbmFBEicRcYEO5Y+tNFEYwMXn+kG4lIIVJRPQTUd85SyPt9NAGsScVH0KLM1ioRxqWQ8sZCSRVOwESirajci0j5LlJ1kAhXtqOFCMVujSBiXSsYTCylZNAUbgbKqdiMXjxE5uwP5vPEXAqX8mR1eL16vF6/Xi9frxev14vV68Xq9eL1evF4vXq+Xg2um09Ybf82nkktLY91ethWt4cVf/4Iff/c2RszNp5H2ma8ewhAChfU1IyJnOlRXQ1t1x5o4Uw3Za9fRdXbSJjrwV7vqaea8jh87j6Y4MNO+yJFJxOGvmP9YU0gdF09jg5vysnyWr8hkxqxpxC0sppH2mUMHMYRAtj5BiEj76kqWMXthBYFieTTFgRmfBvbW0EYDdUc5U00+y1/nPGpgbw1tNFB3lDPV5LP8dXrEqLhUbPhzk7l4JRuO0q6q3N/xeB6BkpwkR3AmyyiSU/BTQUkFfiaRPNKEj91+F6dVUFKBHzvpY+zIZSIkialTYwnkZsnTc1lQ0kDHasj9fSYLKmjDzqMpDsz4WElMSiJQIf/xp1yqaI+HDSsyyagggC3VQaKJ3sceS5qVQK+tZEmZhzPVkPWnReRyjjaVs+0oZ6grXMfvKrgAGthbQxsN1B3lTDX5LH+dnmGP4UdWAtSuWExmmYd21eTy+Av5BEoiLT4ckbrNhSwnUPL812jctJHGTRtp3LSRxk0bady0kcZNG2nctJHGTRtp3LSRxk3rybqfNgrJKq7Bnzk6lqkEKli8ioKjnKGxLJclRQSyJxEXwUViIjnlh8ThJ28lmUX4ScUVH0L7DrC3iEAHG6jjTI1lufyliB5gJTEpiUCF/MefcqmiPR42rMgko4IAtlQHiSYuqMYGN+Vl+SxfkcmMWdOIW1hMI+0zhw5iCIFsfYIQkQ5EROOyE6Bg8SpyGzgnjQ1uysvyWb4ikxmzphG3sJhG2mcOHcQQAtn6BCEXjxE5q715L/Eige6d7CSKrrHccS/TaSPvD6wp4dKx/UW+HzqEEbd+nx/PfYEX8z5k268f4r6fv8zmvY18rbmRyqIX+dnMX7CZQA+NGY6IdM3yZ+ezoNhNI6081FUVkvn4DKauddMdkQ4XLvy4a6hyc5p9PMl2OmZ3MiOJALUrZpP6fA4baj34NNZVkv37aUQ/PJMZC1eyfH0p5e4GGvmGqnNIv/Me4h6ey4yFK1leVEr5ivmkvZBLidvD11o8VJXlMOfZTEoINHlEFCLix9NAbVUxWUvnkjhtERsIZHt4JjPsnEUOszOWUVDr4SueBsoLV5I+cz5ZbrrASlQkbeymvLKBrxx1U17TQPtymJ2xjIJaD1/xNFBeuJL0mfPJctMjzI40/jOJQGWLuHPyXDKLa2hs4ZS6SnJXzMX1ZC61+LOT8ZPxRNIeK+PGOelQSizxIXzNYo8ljQ5Y7+JWO3IZiZv4OBkxBHIXkzHtIdKX5rKhpoFG/snTQFVZDvN++iPSskppyzZxFlPtBIi8ewpzrASoXTufO3+aSVZFA42c0lhbyvIXZjNpcSmBnDw30YGZ3igW1xQ7gUrJ+Pn/YUFxDY0tfKWxppjMx2cwda2bLrFFEWclkHslD/8qh5I6TvG42fDa86T++0pquVhymJ2xjIJaD1/xNFBeuJL0mfPJctMltnAnbZVU1tDISZ4GqqrdNNINJgdTZyYRqJSMh/+V9BXFVB3llJYGygtXkj5zPlluAsTNnE56BPKt5yY3N4dASaQ7wumaEJLvTKWtgtX5lODHcjvp06wEcK/ENfN5sioaaKSVh7qKHObMX0QJgVyTncThxxZONG3s3MG2Or7S6K6kqo5zZ3eQbqdjD7twWei6ikxmP59LSR2nHK1hw2uZpP18ERvoAls40bSxcwfb6vhKo7uSqjq6JfLuKcyxEqB27Xzu/GkmWRUNNHJKY20py1+YzaTFpQRy8txEB2YuoOoc0u+8h7iH5zJj4UqWF5VSvmI+aS/kUuL28LUWD1VlOcx5NpMSAk0eEYWIdMRO2sQkArhXkvaz51le5qaRf/I0UL5+EXemTsOVkUnma/lsqHJT5+G06hzS77yHuIfnMmPhSpYXlVK+Yj5pL+RS4vbwtRYPVWU5zHk2kxICTR4RhVw8fZGzqGTt/6wh0L1MuCOKLhvk5N4fwYt/wc9mXsrbzGNx8VwShj7E/Kf+wBtPb+a0veT/7iFG/46zS3yGh+4wI3K5yn7yHsxP0i0ZKzYyLwZsUbHEkU8JftyFZPz0HjL4hiJuJz0FcvNoV9z3HMTRmXDSp09jSeEySjhtQ9bT3Jn1NO1ZXlbMclpZmZH5CpljQzhnEeN54pFV5C6t4DQ3BS/NJ/Elzi5mFumjTYh8G2U8PIYMuilmFqsecWDGn5XoWDvkVeCv9t1FuN5dxLkKCrICbk6rYMGscSzgn2Yuo/GRWKJj7ZBXgb/adxfhencRl45w0mfO4i+Fi9iAn+p85v00n3l0LmHm48yJMdER28gkksmngDPFxUZhw48tFlciZBdxBltqDKOQy4rJzrxfPU/VzLksr8ZPDdmL55O9mK6JmcWq2UlYaCPYwaNzXfz18VxqOa22eCVTJ6+kc1bSn51Duo1eKy51FjNWzmaJm9PchWT89H4yOFdRJKZaYZkbf7VrnyZx7dNcHFaiY+2QV4G/2ncX4Xp3Eeeqv8lEW9kZ92PJ4JSUZ/jiORdmui7ye7PIyCkkoww/NWQvnEn2QjoXM4uFD8ViRr71qt8nK49ASU6SI+gyS3wSU8lhOX4q1lFQMYU4O/9kInni46TnzCXLzWllq5g6eRWdiplFhiucQEGYrYCb0yqW4XIuwydjxUbmWThHdtImJzEvo5AzWZl32yjMdCSKuInAagJsyJpPYtZ8zk0QZivg5rSKZbicy/DJWLGReRa6LtjBo3Nd/PXxXGo5rbZ4JVMnr6RzVtKfnUO6jQsrYjxPPLKK3KUVnOam4KX5JL7E2cXMIn20CRHpWKRrOvNWF7KggtPKVjHj4VXMoB3VpRS8xldsExey5YkkLJwUMZ4nHllF7tIKTnNT8NJ8El/i7GJmkT7ahFw8RqRz2/N5eR2Bxk/AOZRusOD8l+m0tfm3b/Ahlwozt859kd/8SxjdFnYvv1n8GPFmRKQ99lQyJlrpkohUpt5nJUB5DVV0xIrLlUr7kpgx1s7ZmON+zMKfObDRPQkzf82CsSF8MyYSHv4lC8Za6TZrEguenEKcCRHpiphJZP9qGgnBnCHuvlnMsNIlkfenkm4lQMnOGtqKjknFRie211ALxN03ixlWuiTy/lTSrQQo2VnDxWAeMY2cRdNIoHsS0hey4pFYzHQiwkF6Iu2wkhZrJ1A4w+OttGfymFGYkctOuJMlKxYzz2HlnMRMIvtX00gIpl22lF/y5nwXNrrDiuux/2KJK5xezZLEgt/OIoGzs933DMsfoQtMJKf8kAS6JuE+F8mcf3H3zWKGlS6JvD+VdCsBSnbW0JYt2oGLTuTVUEU3mezMW7iYeTF0T8wksn81jYRgRKgqziWXQK4UB5F0g8WB637aqOB375YSwOZkyW9nkUA3WB1k/GIKcSbaiCIx1UpnSna7+SYiE8aTRjvsPyTNYaJjIbgmzCGBrrCSPCEVF/7yKd9DG1EkplrpTMluN91lS/klb853YaM7rLge+y+WuMK58EwkPPxLFoy10m3WJBY8OYU4EyLSmeBY5j05i2Qr3RMzi1Wzk7DgYyLh4V+yYKyVbrMmseDJKcSZkIvIiHRq8//3B/IJNOGH9xFF91juuJfptLH396xd38gl48p4HlvzAa8+6iSMrgkb9xgvrX+Vx+LMiEhHQnDNXsKS+8PpjG3sLN5Z8RQLUu4iwPpiSurokGWMkxm0I8lJcgRdYCJh2kLefHYKyVa6IJy0uS+R80gsZs6DYDtzXvgLWQ85sNE1NscUli/9NXPsJkTkbMJJm/k8JYvn4gqnfZYkFix9iqkRdMKK62fLKMqYQ7qTANnF5dQRyOxI47n7rHSoGY5xkiWJBUufYmoEnbDi+tkyijLmkO4kQHZxOXVcHJbEWbzz+vPMSQrnrKwO5jy7mjfnJhHJ2YQTl2TnTONJtHOGUTHjOdMUXKNNyGXK4iBj0cvkznYRbaWLwkmbvZgvls3FFU4nTERPeIYtK+eSPtTKWUU4WbDsFbIfsmOm9zPHTCNn0TSSrXTINnYOOU+4iOpL19insCJzEgl0xorrsZfImXk7Fi4ASxILlj7F1Ag6YcX1s2UUZcwh3UmA7OJy6mgjYjxPzIylY42cE4uDjGWvkfVwEpGcjZXkh56hZPFcXOGInFRB9upiAjlJTwine0JIvjOVtmpXvU+BhwDmmGm8ueYppkZwdjGTWL50IfNiTJzJRPKER0m30rGWJr4RWxLp93OGOJeDOM7CPoWcRdNIttKJWGY8u4Ts+ZNIthMgd0sFgUwkT3iUdCsda2mi+0xET3iGLSvnkj7UyllFOFmw7BWyH7Jj5iIJtjPnhb+Q9ZADG11jc0xh+dJfM8duQkTOzhwzjeylzzDHYaUrIlPmUvT7aSQEEyjYzpwX/kLWQw5sdI3NMYXlS3/NHLsJubj6Ip3YTP7Lmwk0ge/fEUa3DXIyYSa8uBg/e3kx7wPmj3Ni5hLRN4oJv32bCU9u442/vcQb6z5k27ZPyN+6l1PCGD5uBLeNmYBzwveZkBiGGRE5q+Aopma8wvj71vG7tbnkFhZT7gasUSQnjWfq91NJi7Fi5iR7LGmsIhufHJbnTydtQjjtCnGQNs3KkmVu/KXddzuRdJWJaNcccp0/ZEP+Ov66vpDN5cVsqOaUiFiSo6NxjXMx2RmLzcT51SectMcWkza9ktx315FbWEZ55Q4Ktrs5xUq04wYSR4wj2XkXaTFWzIhIRyJjHERG2UlLdPKAMxabibMyR6ayZPXtpOf+D1lrC1lXXEktYBvqYHxKKj+630mCzUSr+HgnrM7na6/lkD3dydQI/IST/vQrxDlXk5m1jnXFldRyUkQsyfGxpKfEEsYp5shUlqy+nfTc/yFrbSHriiupBWxDHYxPSeVH9ztJsJloFR/vhNX5fO21HLKnO5kawcUR7mTBfzuZV1VM7jvryf6gmA+LK6nlJGsUyaMduMa5mOyMxWaiy+LGjCeOCkrwc18sw02cwTwslnQgCz8THcSZkMtZHyvJDz9DyZTHKS/KJ+vdQorKyykoq8EnMsZBXHQMrsS7GJ9kx2aiyywjJrF8VRrPVbzPutxCsjYVU1BWw1ciYkmOjyU9JZW0xCgsfbisWBJnkbtmPNlrVpOV/z7ZZTVAOAkptzP5/imkJ4VjoYHcA7QRTlgo7YocO5d38lxkrclhed77FGx3A1aiHaNIvs3JZJeTBJsJaiu5UMyRqSxZfTvpuf9D1tpC1hVXUgvYhjoYn5LKj+53kmAz0So+3gmr8/naazlkT3cyNQI/JhIeWcYXcav41Su55OaVUsVJ1iiSRztwjbsLO+eoTzhpsxeS9nAlBUWFZK8vpGBTMeVuTrIS7RhF8m1OJrucJNhMiHytopisCgKlOBlno9ssY5zMIIcl+HHnULDlxyQ7TPgzR6ayJMfJvMJ8luTlUvLJDgq2u2llG+pg+Ag76SmTSEsKx0Inwl0sX2MnLWslS/Lep2C7m1aRMQ6SR7tIix/ENxOCyzUFXlvJaQ5mOO10hSVxFrmr7yI7J4es/PfJLquhVWSME9f4VGa4koi28JVbnVaocONTsnIdBRPtJAdzWriL5WvspGWtZEne+xRsd9MqMsZB8mgXafGDOFeWEZNYviqN5yreZ11uIVmbiikoq+ErEbEkx8eSnpJKWmIUlj5cfH3CSXtsMWnTK8l9dx25hWWUV+6gYLubU6xEO24gccQ4kp13kRZjxYyIdIc50sWCPzp5tCyfv76eT255OQVlNZwSTkJiNPEJTia7nCTYTHSoTzhpjy0mbXolue+uI7ewjPLKHRRsd3OKlWjHDSSOGEey8y7SYqyYkZ5g8J6EiEgH3pz4AK1MV17Jbf/XHORS5KHg9/+Ca5mb01LJyn+KNAtyGSt49hlaBYUOJPlPLyJn9/EfF7Pr72/RKmbSD7AOHYaIXBo+XfcGNZs30Wr0vCe5+hYHl4O6inI2zH+CVldHRzNywr8ivZGbrMfvYWoefmbxzqZpJCDfZntLt1D++t9odcMD32fYg1OQM338x8Xs+vtbtIqZ9AOsQ4chIpeGT9e9Qc3mTbQaPe9Jrr7FweXo4z8uZtff36JVzKQfYB06DBG5NHy67g1qNm+i1eh5T3L1LQ66oi8iItK7Va8jc5mbAPcnkWxBRERERNpVyoLUTAriY3FFRxM90k585DXYQky0y7OD8k0ESgknEhERERER6Ql9ERGRXqXRA2YTJ3moq8gn49mnySXQ1DsdWBARERGR9g0icmgpBa+VUvAap6XMpeiJNOIsJk7xUFdTTtafnmaBmwBxsVHYEBERERGRntAXERHpRUrJTJxGBp2wz+JHSSGIiIiISEfCSbwzCdYXEiDveRLznufskpgx1o6IiIiIiPQMIyIi0osMIiyRTliZMXMiCSZEREREpBORd08nI4ZzYCUt43GmRiAiIiIiIj3EiIiI9CJBmEPogBXXY//FgrEhiIiIiMhZBMcy7/cvkZkSTpdZo5iasYTl94cjIiIiIiI9py8iItKLWImOd5BQXsyGar5iG+pgfJKLtAlOXBEhiIiIiEgXhdiZ8dxrpM8sJreokIJ3K9i2u5gN1XzNNtTB8BF20hKdPOCMxWZCRERERER6WF9ERKRXiUtfzDvpiIiIiMh5Yol0kB7pID0dERERERHpBYyIiIiIiIiIiIiIiIiI9BJGRERERERERERERERERHoJIyIiIiIiIiIiIiIiIiK9hBERERERERERERERERGRXsKIiIiIiIiIiIiIiIiISC9hRERERERERERERERERKSXMCIiIiIiIiIiIiIiIiLSSxgRERERERERERERERER6SWMiIiIiIiIiIiIiIiIiPQSRkRERERERERERERERER6CSMiIiIiIiIiIiIiIiIivYQRERERERERERERERERkV7CiIiIiIiIiIiIiIiIiEgvYURERERERERERERERESklzAiIiIiIiIiIiIiIiIi0ksYEREREREREREREREREekljIiIiIiIiIiIiIiIiIj0EkZEREREREREREREREREegkjIiIiIiIiIiIiIiIiIr2EERGRzhgMiMglyOvlawakiwwGTvN6EZFLh9fr5TQDlw2Dga95EZHLmcGAtM9g4DSvFxG5dHi9Xk4zcLkyGDjN60VELh1er5fTDHSVERGRTvQNMtOqpcmDiFw6mj0efPqazUjX9Aky49Pc5EFELh0tHg8+fcxmLhd9gsz4NHuaEJHLS3NTEz59zGakfX2CzPg0N3kQkUtHi8eDTx+zmctVnyAzPs1NHkTk0tHi8eDTx2ymq4yIiHQiaGAorVqOezh+9CgicmloOnQIn6DQgUjXBIWG4tNUfwgRuXQ01R/CJ8gSyuUiKDQUn6b6ekTk8tJUX49PkCUUaV9QaCg+TfWHEJFLR1P9IXyCLKFcroJCQ/Fpqj+EiFw6muoP4RNkCaWrjIiIdOKK8GvwObxvHyJyaTi8rxafK8KvQbomOPwafA7v34eIXDoO799PK4PBQHB4OJcL01VXYbrqKloddbtpOX4cEbl8HN63D58rwsOR9gWHX4PP4f37EJFLx+H9+2llMBgIDg/nchUcfg0+h/fvQ0QuHYf376eVwWAgODycrjIiItKJq24cis+hnV8gIpeGuuqd+Fx1441I1wy44UZ8DlVXIyKXhvqa3bQ0NdHqqqHDMBgMXE6uunEYPod2foGIXD4OVVfjc9UNNyLtG3DDjfgcqq5GRC4N9TW7aWlqotVVQ4dhMBi4XA244UZ8DlVXIyKXhvqa3bQ0NdHqqqHDMBgMdJUREZFODLw5Bp8D2z9DRC4N7s+lgGZXAAAgAElEQVQ+w2fgyBika4IGDiQkMopWTfX1HKreiYj0PPenn+JjvTmGy401JgafA599hohcHg58WsGJ5uO0ssbEYuzXD2lf0MCBhERG0aqpvp5D1TsRkZ7n/vRTfKw3x3A5Cxo4kJDIKFo11ddzqHonItLz3J9+io/15hi6w4iISCdCo4fT/+rBtDq8dy+Hdu5ERHrW/m2f4Dl8mFaWm+wEh4UhXWf7TgI+e8vKEJGet3drGT6DHd/hcjN4jAOf2q1lnGhuRkR6v9qyMnwGO76DdM72nQR89paVISI9b+/WMnwGO77D5c72nQR89paVISI9b+/WMnwGO75DdxgRETmL8DuT8dm1sRgR6Vm7NhbjEz42GemeIXfcic+eks0cO3gQEek5NZs20VRfT6sBw25iwLCbuNwEDwlnUPxoWrV4POz6aAMi0rs17Klhf0U5rQxGI0PuuBPp3JA77sRnT8lmjh08iIj0nJpNm2iqr6fVgGE3MWDYTVzuhtxxJz57SjZz7OBBRKTn1GzaRFN9Pa0GDLuJAcNuojuMiIicRcQ9LgxGI632l2/Dvf0zRKRn7N2yhUPV1bQKCg0l4rv3IN0THBZG+NhkfKreexcR6Rktx4/zReF7+FznGs/l6rp7xuPzReH7NDU0ICK9V9V77+Fznete+l15JdK54LAwwscm41P13ruISM9oOX6cLwrfw+c613i+DYLDwggfm4xP1XvvIiI9o+X4cb4ofA+f61zj6S4jIiJnEWQJJeqB7+Pz+d//Tsvx44jIxdV46BDb387D54YHvg8GA9J9UWkT8KndWkbt1jJE5OLbnvcWTQ0NtLLcZCd8bDKXq6tvGcPV8bfQquX4cbb/PQ8R6Z12b9yIe/tntDKaTESlPYB0TVTaBHxqt5ZRu7UMEbn4tue9RVNDA60sN9kJH5vMt0VU2gR8areWUbu1DBG5+LbnvUVTQwOtLDfZCR+bTHcZERHpgmHpD3LltRG0Ovqlm4rX/4aIXFwVa/9Gc2MjrQaOGMl14+9Dzs2VERHcOPEH+FSsXUvDnhpE5OLZVfwRe0o24zPswSlc7oY9+BA++7d9whfvv4eI9C4Hqyr57K1cfG56cApBoQORrrkyIoIbJ/4An4q1a2nYU4OIXDy7ij9iT8lmfIY9OIVvkysjIrhx4g/wqVi7loY9NYjIxbOr+CP2lGzGZ9iDUzgXRkREumj49Efw2bftEz5dtxYRuTg+XvMqB6uqaGXs24/o6Y8g38zQSelYY2JpdaKlma2vvsLhfbWIyIW3p6SE7Xlv4TN0UjoDR97M5S4kMoroqdPwqXz3HXZ9tAER6R0OVVfz8auv4GNLSOT6+/4F6Z6hk9KxxsTS6kRLM1tffYXD+2oRkQtvT0kJ2/PewmfopHQGjryZb5uhk9KxxsTS6kRLM1tffYXD+2oRkQtvT0kJ2/PewmfopHQGjryZc9En4yRERLqg/2AbpgEDOLDpH7Rq2LuXowcOMHDoUIx9+iAi55/ncANbX30V9/bP8Imd/SjWm2ORb27gzTHsK/6I5iNHaGlqYv+2cq4cPJj+AwciIhfGzg8K2Z73Fj5D7hjL8Gk/5tvCcpMdz6FD1H++nVZf7tiB98QJQiMjEZFL1/5t29j6yipajh+nVUhkFPGPP4mxXz+k+wbeHMO+4o9oPnKElqYm9m8r58rBg+k/cCAicmHs/KCQ7Xlv4TPkjrEMn/Zjvq0G3hzDvuKPaD5yhJamJvZvK+fKwYPpP3AgInJh7PygkO15b+Ez5I6xDJ/2Y85Vn4yTEBHpogFDh9HHZMJdVkqrIwf2c6CiHLPFQvBAKyJy/uz7+GO2vvoKR/bV4jPypzMJv3Mccn70DQ5m4IiR7N+0keZjxzjRfJzaj7fi9XoJvT4SETl/jh08yKdvrGX3PzbiY0tIZNSjv+Db5upbxnBs3z4avqii1aHqndTv3s2Vg22YrrgCEbl0nGhpYUf+22x/Ow+v10urK6+NYPTjTxBksSDnpm9wMANHjGT/po00HzvGiebj1H68Fa/XS+j1kYjI+XPs4EE+fWMtu/+xER9bQiKjHv0F32Z9g4MZOGIk+zdtpPnYMU40H6f24614vV5Cr49ERM6fYwcP8ukba9n9j4342BISGfXoL/gmDN6TEBHppp1vrmPbn/+Ev6ujh3PtGAcDrrsOETl3X37+ObuKP+LLHZ/jYzAaifn3OQy5/Q7k/DtSs5vShb+lfscOfPqHhhKRkMiQ+NEYDAZE5Nw0HjpEzT82srPoQ/xd67ybkTP/jW+z8uUv8sXa1/F3zRgH19wyhmCrFRHpOSeOH6dm8yaqNxTR1NCAT+iIkYya/XOCBg5EvrkjNbspXfhb6nfswKd/aCgRCYkMiR+NwWBARM5N46FD1PxjIzuLPsTftc67GTnz35BTjtTspnThb6nfsQOf/qGhRCQkMiR+NAaDARE5N42HDlHzj43sLPoQf9c672bkzH/jmzJ4T0JE5Bx8ubWMbS8u5fCuavxdGRbGoKHDGHDd9Vw5eDD9goMRkY55Dh/mcG0tdTu/4MBnn3L0wAH8WW6yM3z6I1x1w43IheNtaeGTpUvY9fbf8dc3KIhB0dGERkZx1ZBw+g8ciIh07ETzcY4cOED9rl18+fnnuD/fTlvRU6dz/X3fQ2DX23mU/+VFWpqa8BcaFYX1xqFcde21XDHoavqYTIjIheP1ejn2pZv6mhoOVlayv3wbJ5qb8Xf9vd8j+kfTkfPL29LCJ0uXsOvtv+Ovb1AQg6KjCY2M4qoh4fQfOBAR6diJ5uMcOXCA+l27+PLzz3F/vp22oqdO5/r7vocE8ra08MnSJex6++/46xsUxKDoaEIjo7hqSDj9Bw5ERDp2ovk4Rw4coH7XLr78/HPcn2+nreip07n+vu9xPhi8JyEi8g18lvUylWtewev10p4+/frRJygIEWnDC81NjZxobqY9ffsHc8OEfyUq7QHk4tn30QY+f2UV9ZU7aJfBQL/+/TEYjYhIoBPNzTQ3NtKRsNuSGDopnSuuuRY5rfHAfrb/71/ZXbCejvQNMmPs1xcROf+8J05w/OhROmKxR3PjxB8waFQccuHs+2gDn7+yivrKHbTLYKBf//4YjEZEJNCJ5maaGxvpSNhtSQydlM4V11yLdGzfRxv4/JVV1FfuoF0GA/3698dgNCIigU40N9Pc2EhHwm5LYuikdK645lrOF4P3JEREvqGmuoNUv5lLzTsFHNu/DxE5d1dccy3XJI8j4p7x9O3fH+kZewrfY3f+27hLtyAi587Yty9D7hjLtXd/F8tNdqRj9ZU72JX3Fnvee5fmxmOISM+6evQYrr3rbgZ/JwG5ePYUvsfu/Ldxl25BRM6dsW9fhtwxlmvv/i6Wm+xI1+0pfI/d+W/jLt2CiJw7Y9++DLljLNfe/V0sN9k53wzekxAROY8Olm/jy61l1H++nSM1u2n68iDNTY3g9SIifgwG+vbvj3mglSuuuYYBQ4cx8OZYBgwdilw6ju3fj3tLCXWflnN4506O7d/P8SOH8ba0ICKBjP36YRowgGBbGCFRUYQOH8HV8bdg7NcP6Qavl/2b/sHBTz6mvnIHR/fuoenQIU54PIjI+Wfo04d+V4bQf/BgQq67Hkt0NINGxRMUGor0nGP79+PeUkLdp+Uc3rmTY/v3c/zIYbwtLYhIIGO/fpgGDCDYFkZIVBShw0dwdfwtGPv1Q87dsf37cW8poe7Tcg7v3Mmx/fs5fuQw3pYWRCSQsV8/TAMGEGwLIyQqitDhI7g6/haM/fpxoRi8JyEiIiIiIiIiIiIiIiLSCxgRERERERERERERERER6SWMiIiIiIiIiIiIiIiIiPQSRkRERERERERERERERER6CSMiIiIiIiIiIiIiIiIivYQRERERERERERERERERkV7CiIiIiIiIiIiIiIiIiEgvYURERERERERERERERESklzAiIiIiIiIiIiIiIiIi0ksYEREREREREREREREREekljIiIiIiIiIiIiIiIiIj0EkZEREREREREREREREREegkjIiIiIiIiIiIiIiIiIr2EEREREREREREREREREZFewoiIiIiIiIiIiIiIiIhIL2FEREREREREREREREREpJcwIiIiIiIiIiIiIiIiItJLGBERERERERERERERERHpJYyIiIiIiIiIiIiIiIiI9BJGRERERERERERERERERHoJIyIiIiIiIiIiIiIiIiK9hBERERERERERERERERGRXsKIiIiIiIiIiIiIiIiISC9hRERERERERERERERERKSXMCIiIiIiIiIiIiIiIiLSSxgRERERERERERERERER6SWMiIiIiIiIiIiIiIiIiPQSRkRERERERERERERERER6CSMiIiIiIiIiIiIiIiIivYQRERERERERERERERERkV7CiIiIiIiIiIiIiIiIiEgvYURERERERERERERERESklzAiIiIiIiIiIiIiIiIi0ksYEREREREREREREREREekljIiIiIiIiIiIiIiIiIj0EkZEREREREREREREREREegkjIiIiIiIiIiIiIiIiIr1EX0RELhCv18u+g8eoP+Kh6XgLIiIiIiIiIiIiInJ56dvHyJX9+2EdYKZ/UF8uhr6IiJxHVXvrKfv8AJ/urOOL2nq8XkRERERERERERETkW2BwaH+GXmNh5A1WYm8cxIVi8J6EiMg39NG2Wt4r2U3V3npERERERERERERE5NttwBUmkmLDcd4SQVC/PpxPBu9JiIico8+q6/hb4Q4q99TTlqlfH6yWKwgJDsLUrw8iIiIiIiIiIiIicnlpbjnB0WMevqw/xuGjTbR1Rf9+3HdrFHeMCud8MXhPQkTkHLzxYRXriqrwFxIcRHTUYKKutRJmDUFEREREREREREREvh0OHW6kquZLPv1iP3v21+Nv1NCreTDlJoLN/fimDN6TEBHpphXrtrGxvBafK4ODcNwcQczQIYiIiIiIiIiIiIjIt9vOvXVs/LiaXbV1+NgGBjPtvpGED7qCb8LgPQkRkW7489+2smX7AXxuHhrGHaNvoF/fPoiIiIiIiIiIiIiI+GypqOGdf3yOz4Arg5j1QCzhg67gXBm8JyEi0kUv51Xw4dY9+Iy95Ubi7OGIiIiIiIiIiIiIiLSnZt8h1hWWc+SYh1a2gcH8/AfxBJv7cS6MiIh0UcHmXXy4dQ8+dyUMI84ejoiIiIiIiIiIiIhIR8IHDyBt3M1c0d9Eq9ovj/Jy3qecKyMiIl1Q++VRXi3Yjs9tcZGMvDEMEREREREREREREZGzsVquYHxSND5btu/nvS01nAsjIiJd8MaHVfjcGDGIMSMiEBERERERERERERHpqvDBA7jzlhvxWfthJU3HW+guIyIiZ1G5p55Nn+7D5/b4KEREREREREREREREumuUPZxrbRZaHTl2nPx/VNNdRkREzqKwrAafW0Zcy4ArzYiIiIiIiIiIiIiInIsxIyPwKSytobuMiIh0ornlBP8o34dPzLBwRERERERERERERETO1XVhFoZcfRWtDv3/7MELYBUEofDx/44HNgR5yehMRzHAyQYaA41NVGQGBlxSwKVCehO0FM0SiGtJVCZmKOhXidcQrAwweWkkXFFHqLD5gE2BYYBOBeUIKPOBbDg9n0uPewhjGwM8+f/9du/l+Rd3Uh8BJKkW64vfovzDj6jQ8bi2tGwejyRJkiRJkiRJByP1a4lErX/pTeojgCTV4sXXSoj62nFtkCRJkiRJkiTpYHU8ri1Rm18roT4CSFIttm5/j6ikdi2RJEmSJEmSJOlgtWqRQIuj46mwfdce9pSVU1cBJKkWO9/eQ1TbVkcjSZIkSZIkSVJjaNuyGVFvvl1KXQWQpFq8+/4HVIhvGiR4VABJkiRJkiRJkhrD0c2aEvXeng+oqwCSVIvyDz+iQvCoAJIkSZIkSZIkNZbgUQGiyj/8iLoKIEmSJEmSJEmSJMWIAJIkSZIkSZIkSVKMCCBJkiRJkiRJkiTFiACSJEmSJEmSJElSjAggSZIkSZIkSZIkxYgAkiRJkiRJkiRJUowIIEmSJEmSJEmSJMWIAJIkSZIkSZIkSVKMCCBJkiRJkiRJkiTFiACSJEmSJEmSJElSjAggSZIkSZIkSZIkxYgAkiRJkiRJkiRJUowIIEmSJEmSJEmSJMWIAJIkSZIkSZIkSVKMCCBJkiRJkiRJkiTFiACSJEmSJEmSJElSjAggSZIkSZIkSZIkxYgAkiRJkiRJkiRJUowIIEmSJEmSJEmSJMWIAJIkSZIkSZIkSVKMCCBJkiRJkiRJkiTFiACSJEmSJEmSJElSjAggSZIkSZIkSZIkxYgAkiRJkiRJkiRJUowIIEmSJEmSJEmSJMWIAJIkSZIkSZIkSVKMCCBJkiRJkiRJkiTFiACSJEmSJEmSJElSjAigBivdWsDCeyZy9XfO5rQT4oiLiyMuLo64pHTOHnAZ438/h9wXSpAkSZKkqgqmBUkNBUkNBUkNBUkNBUkNBUkNBUkNXcOKtzmAElZcFyQ1FCQ1FCQ1FCQ1FCQ1FCQ1FCQ1dBMFfPHsWJRDaihIaihIaihIaijI1YvCHA4F04KkhoKkhoKkhoKkhnJYvI2YUTAtSGooSGooSGooSGooSGooSGooSGroGla8zQGUsOK6IKmhIKmhIKmhIKmhIKmhIKmhIKmhmyjgi2fHohxSQ0FSQ0FSQ0FSQ0GuXhTmUNqxKIfUUJDUUJDUUJDUUJA7VyNJkqQvkACqv3AuU7/TiWYdejJ81GTumJdL3mYqhTeQ+8hMpl4zkrPT2tBp0HjmrCtBkiRJkg5sOnmFpdSqrIC8PyF9ajp5haXUqqyAvD8hSZIk/UcIoHopnnsZnZLOZvy8YuqqeOlURp7Uk8vmFiNJkiRJB7J4dSG1WpfHYqRKi1cXUqt1eSxGkiRJ+s8QQHVUyoYZIzltxEyKaYhiZo44jZFzi5EkSZKk2uxYsooi9q/o2YXsQKq0Y8kqiti/omcXsgNJkiTpP0MA1Unpkzcy8vtzCFNDl2HcuqSIbe9GiEQiRCIR9ux6iVWzxpEdooYwc8ZOZOFWJEmSJGn/1uXx4hb2o5j1uYVI1azL48Ut7Ecx63MLkSRJkv5TBNCBleYx9YeTKaC6lEtn89KGBYwbmEaoBZ9JaJ1C1qW38tjax7g+k+rCc7hxbgGSJEmSVFVm335UWsSaDSXs07Z8VqygUt9+ZKIvo8y+/ai0iDUbStinbfmsWEGlvv3IRJIkSYpdAXRAxXNvZGIh1WXeyoJZI0gJsn/tsrnxzlvJorqCaQvILaV2pWHy5k7l6u+czWknxBEXF0dcXBydss7msgkzWfJCCQeUP5m4uDji4uKIi4sjLm44c7YCLy9h8qizSU+KIy4uifTsy5i8aAMl7ENJMUvuGc9l2ekkxcURFxdHXFI6Z3/naqbOzSNcjiRJkqRG0LJ3PwZQaXZ+AWV83jtr81hGpZHZA2lJA5SFKVg0jV9d3p8LsoKkhoKkhoJkD+7P9TfMYsXmEuqnhK250/nV5aeTHQqSGgrS56z+XD9tLgXbqYd87gwFSQ0FSQ0FSQ0FSb18LjvYtx2LckgNBUkNBUkNBUkNBbl6UZjGsGP1XGZdl8MFWUFSQ0FSQ0GyB/fn+htmsaK4hCOtZe9+DKDS7PwCyvi8d9bmsYxKI7MH0pIGKAtTsGgav7q8PxdkBUkNBUkNBcke3J/rb5jFis0l1E8JW3On86vLTyc7FCQ1FKTPWf25ftpcCrbTYO8UL2XeDd/nkrOSSQ0FSQ0F6XNWf66+bhqLV4eRJElS7AugAygmd+4Sqgtx/U1XkUEd9BjN+CvTyM65iut/N5uHCl5i16YbyU5gP0oo+P1I0lOSOG3EeO6Yl0veZj5TnJ/LzFsuY3BaG5Kyx7PwZern5TmMzBrMxHty2RDmY2E2LJ/JxGFXs2AzVZSyYe54zk7rxOBRU5m5fANhPhXeQO68Oxg/4jSS0oYz9ckSJEmSJB2klAx6dqfS/DyKqKmUovzpVOpHetck6qeEopkXM/DUZC64cgKzFy+noJjPbF29nHnTv8/lp7ejz/AJLNvCgW1Zys3Du5M94hpmL85nK5/Y8cJy5k25mAtOPoHr79vAO8SIt/KZdfkJ9Bl8MTf/aREFxXxm6+rlzJv+fS7P6s4lv5jLi7s5clIy6NmdSvPzKKKmUoryp1OpH+ldk6ifEopmXszAU5O54MoJzF68nIJiPrN19XLmTf8+l5/ejj7DJ7BsCwe2ZSk3D+9O9ohrmL04n618YscLy5k35WIuOPkErr9vA+9QD7s3sPgX/RmYNYTrp88i/4UwUTteWM6yP01g3OBksi+fRsFbSJIkKYYFUO1ezmPJI9QwkuysBOqmNcOmF/HY/X/gxh+OYFCPFFq3YD+KWTiqJz2vmcOGMAcUXj6V4VlnMzm/lLop5tYfjmROmM/rMYieXfhUKXm/GUz2iKnkhqnd5oWMPyONkXOLkSRJknQwupIxgErbF1KwjhoKKZhPpfb96JZKPRSz7Menct71c3lxOwe0Y+U0rh7cnztXl7JfW+YybvAQZq0Ms3/FzPtxf352dzFfeFsWcf3g07l5cTG1C5N/18UM/M5NFOzmCOlKxgAqbV9IwTpqKKRgPpXa96NbKvVQzLIfn8p518/lxe0c0I6V07h6cH/uXF3Kfm2Zy7jBQ5i1Msz+FTPvx/352d3F1MnufO68pD/j7lrODmq3dfEELjjrYhZvQZIkSTEqgGpVumkNC6nh4tNIT6CRlZL365EMv6eYegnnMnHoaOa8TB0UUFDIPmVckk0GnyhZPJ5hP8slTF2FmTNiJFMLkSRJktRgzejcYwyVCvlnYTHVrFvFw9updH4WnamrUgqmXczV9xVTL9uXc9ull7N4C/tQzLz/uZjF26mDMAWrC/liK2TWFTnMK6buVk/i6l8v5R2OhGZ07jGGSoX8s7CYatat4uHtVDo/i87UVSkF0y7m6vuKqZfty7nt0stZvIV9KGbe/1zM4u3UQZiC1YUcWAkrfn0+t60MU2fb5zLuimkUIUmSpFgUQLXaFi7mc05IIkTjKs2fylWT8qguRPbPF1C0Yw+RSIRIZA/b8u5mXL8Q1YTnMHLMTIppqAxG9svg30rzuGPSHYSpKsSgmx7jpXcjRCIRIu++xGM3DSJEVXmM/91CSpAkSZLUUC3TshhApfwV+eyg0tbCpRRRacBJacRTN2Wrp/GrKflUFyJz7DyWFr3HxnA5G8PvsfKhPzKqT4hqts9l3P/MYivVla28h9tzqSFE5th5LC0qZ2O4nI2v7OSBO8eS2Z4vvB2LJnPzaqrrNYapj+5kbbicjeH3ePbRexnZi2p2/OnnzF/HEdEyLYsBVMpfkc8OKm0tXEoRlQaclEY8dVO2ehq/mpJPdSEyx85jadF7bAyXszH8Hisf+iOj+oSoZvtcxv3PLLZSXdnKe7g9lxpCZI6dx9KicjaGy9n4yk4euHMsme2pk7LV07ntT2GqaT+QaxdsYu3WcjaGy1n7zCNcOyBENasnMHtJCZIkSYo9AVSr8MsLOfRKWPLHiRRQVYgRc1bx2A3DSGuXwCcSCGWO5ta/L+TGTKpbOpGZy0upk8yrmF2wi0gkQiQSIRJZw7ge/FvpEwuYWEg1WdOW8NBPs0lpwSdapJD90wUs/HkG1dwzkwWbkSRJktQA3b4agg5p9OxOpcV5FL3Np8IUrFhOpX707RGCoxKI50BKWHHvJIqoKsSQO5/gLxOG0rltAp9IILHXKK77y3yu7UV1uZOYt7KUSqUUPHYTO6guY8J8ZkwYSue2fCK+NelDpzDjnhvI4IusmH/et4hq2o9hxr2/Y0j31sRTIYGW3S/iF/97LwOoqpCZf19OGYdXt6+GoEMaPbtTaXEeRW/zqTAFK5ZTqR99e4TgqATiOZASVtw7iSKqCjHkzif4y4ShdG6bwCcSSOw1iuv+Mp9re1Fd7iTmrSylUikFj93EDqrLmDCfGROG0rktn4hvTfrQKcy45wYyOJBSCh6aRBFVZXLdnMVc2SeF+CD/Ft+hH1feOZ8ru1PNvL8uZCuSJEmKNQF05G1dwux7qG7gjdx4UQr71CKL6++8lQyqCjN56SpKOZAMbrztVkb0aM2+FDwxlepGM/7iDD4vgaxLriKbqpaQWxBGkiRJUkN1Jf1MqphOXmEp//Z2AWsWU6l9P7qkAO2T6MIBbFvK3++juuwbuHZoCvvUPJMrfzuFdKoKc+djeZQR9QIF86lhDGN+kEk8nxffawyjL+SLa0s+K1ZQTfoVo+jbls/rMJTvjqGaHffl8SJHQlfSz6SK6eQVlvJvbxewZjGV2vejSwrQPokuHMC2pfz9PqrLvoFrh6awT80zufK3U0inqjB3PpZHGVEvUDCfGsYw5geZxPN58b3GMPpCDqCQvOlUd+E4zu/O5zXPJOd7/agmdzlF25EkSVKMCaAjrnTDGhZSXfbQbFKoRY9shoeo7t48NnAAPUYyKDOBfSumKJ8aUkhqx751SSeb6ua8UIwkSZKkhkqgc8ZFVLV4dSEVygqXM5sqBmXQmbop21jAMqrLHNSPZGrRPZtz2lPd/Dxe5FPbi9m8nepy+pHenP1oTXqfoXxRlb1UwDKqS04KsW8JdD5pKNVsL+DFbRwBCXTOuIiqFq8upEJZ4XJmU8WgDDpTN2UbC1hGdZmD+pFMLbpnc057qpufx4t8ansxm7dTXU4/0puzH61J7zOUWhUXUUANX02iJfuWnNaP6uby4hYkSZIUYwKoVqFQNodayc5iaso+KYXaJZFyBtWFt7GthNr1zyCN/QkTfoQaJnJaXBxxcXHExcURFxdHXFwccXFxxMWdxkRqWFtMGEmSJEkNldi9H5lU2rFkFUVA0eo5VDWgV1daUjfvvFVMTVlpKdQuRHJvqnRsbu8AACAASURBVNseZsfbfOLDUsqoISWJRPYvsX0KX1TvvFVMTcuuTCY1FCQ1FCQ1FCQ1FCQ1FCQ1FKTPlYuobhFbX+eISOzej0wq7ViyiiKgaPUcqhrQqystqZt33iqmpqy0FGoXIrk31W0Ps+NtPvFhKWXUkJJEIvuX2D6FWr0VJp8appxOaihIaihIaihIaihIaihIaihI6uBJ1LT+1TCSJEmKLQFUq2atQ3zO2mLCfBFto+Q9andMMxKQJEmS9IWVkk5WeyqtW8r64kIKloSp1IOeaSkcftt4530aLL5Fa3QIpKST1Z5K65ayvriQgiVhKvWgZ1oKh9823nmfBotv0RpJkiSppgCqVSgjm2xqmLeGolLqrOD3gxl5yxI27OQQSyHUjlplh0JIkiRJ+iLrSvogqljOikVLWLOOKgaR0Z0jIIXEtjRceSk6FLqSPogqlrNi0RLWrKOKQWR05whIIbEtDVdeiiRJklRTANWuS08G9aCG2Sx5opS6KSB31hLmTBhMemIS6SMmMyc/TCmVWrdLoabctcXUrpjiJ6iuRxJtEqhV6xbN2L8EEnpQ3dDZvBSJEIlEiEQiRCIRIpEIkUiESCRCJBIhEokQiUSIRCJE7h9BCEmSJEn104P4eD7Vms69hlLVsimTWEYV38uiM3XXsm0KNeVtKKZ2L7P1KarrHqJlPJ84ujWJ1LCumK3s39ZXN9DYynaX0BjimydR05UPlrMxXM7GcDkbw+VsDJezMVzOxnA5G8PlbAyXszFczsZwORvD5VzZi8OoB/HxfKo1nXsNpaplUyaxjCq+l0Vn6q5l2xRqyttQTO1eZutTVNc9RMt4PnF0axKpYV0xW9m/ra9uoFbxCaRT3YDfb2JjuJyN4XI2hsvZGC5nY7icjeFyNobL2RguZ2O4nI3hcjaGy/nD0BCSJEmKLQF0ABkMGpVFdWGmTrqDAg6s+N4bGV/Ip8JsmDuRkVmjmb2ZzySk9WQY1eUuyqWY/SvNz2VBmGpCAzNI42CkkdGf6hatobgUSZIkSYdUConH8pnktCzS2b/M7l1pSdRXaNeXWsWnZjCA6vKXLGcr+1e2OpeHt1NNYnYGnflUqyQ6dKe6Jcspeov9KKFo5SIaZHExW9mXEl5ct5zG0PKEHmRSXcGmYr64Ukg8ls8kp2WRzv5ldu9KS6K+Qru+1Co+NYMBVJe/ZDlb2b+y1bk8vJ1qErMz6MynWiXRoTvVLVlO0VvsRwlFKxdRqxMyyKS6ZRtepgxJkiT9JwugA0q7aCJXhagufzzDR82huJz9Ki2cytUTFvI5l45meBcqJWcxaCDVLZ3IxLnF7NN7eUy9diIFVBVidP/TSOBgJNAzazTVTWXqPRuQJEmSdBidkEEm+xMi66QUKjUjoSW1S8qkbzbV5U7itkXF7NPufGZNmkQRVYU4v28W8UT1IGNQiOpmMfNP+ZTxeWVPTWP6fTRQHpuL+bzNc/jrn2gcKT05qzvV5E+5kxVvExtOyCCT/QmRdVIKlZqR0JLaJWXSN5vqcidx26Ji9ml3PrMmTaKIqkKc3zeLeKJ6kDEoRHWzmPmnfMr4vLKnpjH9PmoXn0HPC6lu+jTmb0aSJEn/wQLowNoNYuLvRhCiuuJ7RnLagMu4Y+kGwu/xmdLwBpbcMpz0jPEsCVNDFrdeM4zWVJXCyGvHEaKqMHNGnMbZY+dQsLOUT5QSzp/J+G8PY2I+1eVM5ep+CRys1v1HMC5ENUvGZDP4N0vYsJPPlL6cxx2XdCI9ezhXT7qDOUsLKC4pRZIkSVIjiE+jZw77MYKMrtRTCt/+wVgSqSrM4ivP4JJfzKXorVI+UcqO1bO4+ZLzuW011Q25he/2SaCqjAFjyaC6ginnc/mURbz4Fp8oK6Fo0TVccu5NFFEXHemcQw1Luf7qSazYVsq/lZfwYu5NXD7sGlbQWHow4HsDqWb7NC4fcQ3zVocp41NlJby4ZBIXZJ3OJT+ewKz7FlGwOcw7ZXxOwbQgqaEgqaEgqaEgqaEcFm/j0IhPo2cO+zGCjK7UUwrf/sFYEqkqzOIrz+CSX8yl6K1SPlHKjtWzuPmS87ltNdUNuYXv9kmgqowBY8mguoIp53P5lEW8+BafKCuhaNE1XHLuTRRxIK3pO3QsiVS1lF8NG8KduRt4p5xPlJeydfV0xmWdxCWXX8NtM+eyYl0x7+xGkiRJMSiI6iSUM5OFN4UZ9rNcwlQKL5/J1ctncjV1EWLEnNmM68HnJPQfzx0Xz2b4vWEqhcm9bSQ9b6N2oRHMnjKCEI2gRTZXTRnG1EsWUinMkp8NZsnP2IdiNixfyB1UyOLWglWM64EkSZKkgxIivXc/mLecz8nJonM89Rbfdyy/yJnD1fPCVAqTf9fFnHcXtWt/EVMnXUQiNXQfw7VjpnHJ9DCVwuRPy2HgNBooRHqfgTBvKdWsvonLM27iUEoeej1X/mkpd66j0urpXD94OtezD8X55N/HvyV+bzFLbx5IS46UEOm9+8G85XxOThad46m3+L5j+UXOHK6eF6ZSmPy7Lua8u6hd+4uYOukiEqmh+xiuHTONS6aHqRQmf1oOA6fRIPF9r+S6IdMYt5hK25dy24il3MY+FG8gf/F0/q3XFB54aCzpSJIkKZYEUB0lkPXTh8idNZoUGiLEoGlLmHlRCvsWYtj0XO6+OES9hAZx69KZjOhIo0m5+FYWXJpC/YQYMWc243ogSZIkqREkp/Ujkc/L7N2DRBoixICbH2FyToh6aT+Q6+bMYEgH9iGBzHHzubYXdZKYcxEDOLDkAWMY2Z46Scy5iAE0kuaZjPntDWS2p3563cAffj6QlhxZyWn9SOTzMnv3IJGGCDHg5keYnBOiXtoP5Lo5MxjSgX1IIHPcfK7tRZ0k5lzEAA4khSGT5pGTQv20v4ip/zuWdCRJkhRrAqgeEki79G7WrJ3NuIEp1FWo3zhmr93AQ9dmkEAtWqQx+i8bWPO7EaSFOKCUnFtZtfYhxvVIoHGlMGzWGlZNGUYKdRDKZtzCVcy+KAVJkiRJjaRLBgOoKURWWgoN1jyNnN+v44HJF9G5PQeUPGQKf/vnYkZ1T2C/mmdy5f3P8oshKdQmccAUZtx8Fd2og7YDuW7OFPq2p1YZo+fxt9uuohuNJ77Xz5ix8F5G9QlRF8lDfscDc35GRnOOvC4ZDKCmEFlpKTRY8zRyfr+OByZfROf2HFDykCn87Z+LGdU9gf1qnsmV9z/LL4akUJvEAVOYcfNVdKMOOgxl8kNPct2QFOoisc9Y/vDQvQzpgCRJkmJQANVb6+4juHXJS+zZsorZvxvH6P7ZpIWoFEoju/9oxv1uNo9t2MW23FsZ0b01ddOajB/Opqh4G2sW3s24S7PJ6sJnUjKzGf2Tu3lowy5eun8cWe04RFqT9ZMFvLSjiIdmjWN0/2zSQnwm1D2b7EvHcfeSInZteYxbh6YgSZIkqaHSSE6iulZd6TmIGoaR3oWD1Jr00fey9JmtPDDrj4y6sB8ZKXwmuVc/csb8kRlP7iR3xlgy2nJgzXswcsYmVj50L9de2I/O7flESiaZF97A1AWbWPqXsaQ3p87iu49lxhObmDF5DAN6pRCV3KsfOWP+yIwnd/K3yUNJDtLo4rtcxHULNrPyoXu57ntDyeyVQqUUMvoOZeSke/lbwXvkzhhDeiuOgDSSk6iuVVd6DqKGYaR34SC1Jn30vSx9ZisPzPojoy7sR0YKn0nu1Y+cMX9kxpM7yZ0xloy2HFjzHoycsYmVD93LtRf2o3N7PpGSSeaFNzB1wSaW/mUs6c2pu7aZjJqxiWefXMzkMaPI7JNGIlEhOvfpR86YPzLjyZ2sXDCFAR2QJElSjIqLfAxJ2o8f3vZPKjRv1pTRQ3sjSZIkqZ5W30Tq4EnAUKYWzGNIEpIkSZI+lvv0JtZtDlPhB+eeRPdOx1IXASRJkiRJ0iGz49UC/q19Bp2TkCRJknSQAkiSJEmSpEOkkMV3L6JC4oVZdEaSJEnSwQogSZIkSZIaXdm25dx5yX9x82o+NpTrLu5HPJIkSZIOVhBJkiRJktTISljx2/7ctgxo349rZ8xgSAckSZIkNYIgkiRJkiSpkbWm78U30LcsgdG/GktmeyRJkiQ1kiCSJEmSJKnRxff6GTN6IUmSJKmRBZAkSZIkSZIkSZJiRABJkiRJkiRJkiQpRgSQJEmSJEmSJEmSYkQASZIkSZIkSZIkKUYEkCRJkiRJkiRJkmJEAEmSJEmSJEmSJClGBJAkSZIkSZIkSZJiRABJkiRJkiRJkiQpRgSQJEmSJEmSJEmSYkQASZIkSZIkSZIkKUYEkCRJkiRJkiRJkmJEAEmSJEmSJEmSJClGBJAkSZIkSZIkSZJiRABJkiRJkiRJkiQpRgSQJEmSJEmSJEmSYkQASZIkSZIkSZIkKUYEkCRJkiRJkiRJkmJEAEmSJEmSJEmSJClGBJAkSZIkSZIkSZJiRABJkiRJkiRJkiQpRgSQJEmSJEmSJEmSYkQASapFIBBHhY8+iiBJkiRJkiRJUmP56KMIUYFAHHUVQJJqcXR8kAqlez9AkiRJkiRJkqTGUlpWTtTR8UHqKoAk1aJtywQqRCJQ8u4eJEmSJEmSJElqDCXv7SGqzTHx1FUASarFce2aE/XGm+8hSZIkSZIkSdLBKvugnLfefp8KLY5uQqsW8dRVAEmqRUpSK6K2hHchSZIkSZIkSdLB2rKthKhOSa2ojwCSVIuuX2tD1OYtbxKJRJAkSZIkSZIk6WBs2rKTqLSObamPAJJUi7YtEzjxq22osPeDctZtDiNJkiRJkiRJUkO9/V4pm17ZQVSPExKpjwCSdABZ3ZOIKnjhNSRJkiRJkiRJaqg1RVuJyuwWokWzJtRHAEk6gF4ntie5fQsqlLy7h/znX0GSJEmSJEmSpPraEi5h7eZtRJ2VkUx9BZCkOhjYuyNRT697lVde34UkSZIkSZIkSXX1QfmHrHj2RaL69jie4xNbUF8BJKkOTu7SjqzuSUQty/sXO0t2I0mSJEmSJElSXfzfyhd46533qZDYuhnnntGZhgggSXV0wdmpdGh/DBX2lH3A4n+u540330WSJEmSJEmSpP2JRCI89MQGil97i6gLv3kiTYIBGiIu8jEkqY7eeOt9/rDgOUreK6PCUUcFOLv3CXTt2B5JkiRJkiRJkqrauWs3uU9vIvzmu0SNHNCVzG4hGiou8jEkqR5e2/EeMxav4823S4nqmtKe3t2/SqtjmiFJkiRJkiRJ0jPrt5D33MtUddE3T+S0k5I4GHGRjyFJ9bTr3TL++vALbNyyi6q6dQ6R1qk9xyW2QpIkSZIkSZL05fL2u3v41ys7WLtpG7v37CWqRbMmjBhwIid1asfBiot8DElqoCV5L7M0/2VqanF0PMcltqRd6+Yc0zyepk2CSJIkSZIkSZL+s5R/+BHv79nLW++8T3jnu+zY9R41ZaQmMuzMLrQ+Jp7GEBf5GJJ0ELa9uZtHn3mVpze8gSRJkiRJkiRJFTod14qzT+nAyZ3b0ZjiIh9DkhrBjpI9PLPhDZ5/cSev7XgPSZIkSZIkSdKXS6sW8XRPOZZeJ7bnhA6tORTiIh9DkhrZ2++V8er2d9m+aw/v7N5L2d4PkSRJkiRJkiT9ZwkeFUeLo5vSrlUCxye2IOnY5hxqcZGPIUmSJEmSJEmSJMWAAJIkSZIkSZIkSVKMCCBJkiRJkiRJkiTFiACSJEmSJEmSJElSjAggSZIkSZIkSZIkxYgAkiRJkiRJkiRJUowIIEmSJEmSJEmSJMWIAJIkSZIkSZIkSVKMCCBJkiRJkiRJkiTFiACSJEmSJEmSJElSjAggSZIkSZIkSZIkxYgAkiRJkiRJkiRJUowIIEmSJEmSJEmSJMWIAJIkSZIkSZIkSVKMCCBJkiRJkiRJkiTFiACSJEmSJEmSJElSjAggSZIkSZIkSZIkxYgAkiRJkiRJkiRJUowIIEmSJEmSJEmSJMWIAJIkSZIkSZIkSVKMCCBJkiRJkiRJkiTFiACSJEmSJEmSJElSjAggSZIkSZIkSZIkxYgAkiRJkiRJkiRJUowIIEmSJEmSJEmSJMWIIJJ0iHz00Uds2bKVnW++yZ49eyCCpC+xuLg4jj66Ge0S29EhORnFntdf38b2HTvYvXs3kY8iSNIRFwfNmjWj3bHH0qFDMoFAAMWO11/fxvYdO9i9ezeRjyJI+hKLg4SEBNq2bUOH5GSaNGmCvth27NzJG+E3ePfd9/jwww+R9OXWpGkT2rRuzXHHHUeLFs05HIJIUiNat76Ix594kmdXr6GoaAORSARJqqlJkyZ0S0/n1FN70ffMMzihS2f0xfPqli38c8XjPP30s6xbX8SePXuQpC+quLg40tPTOKVXT84843S6d0tHXyyvbtnCP1c8ztNPP8u69UXs2bMHSdqXE1NPoFevnpxxeh96ZvRAR15JSQm5y1eQ/9TTPL92Lbt2lSBJ+/LVDh3IyPg6fU7Lou+ZZ3CoxEU+hiQdpCVLH2b+wkWsX1+EJNVXr149yRk+jH5nnYmOvPynnmbe/AU8uTIPSYpV3bqlc/6woQwaeA46svKfepp58xfw5Mo8JKm+unTuxPBhQxk29Fx0+G3ctJm/3T+Pfzy0FEmqr3btjmXoeecy4qILOLpZMxpTXORjSFIDrV5TwJ3/+0fWrltPTQnNmnPc1zrR9tj2JDQ7GuLikPTlFYl8xJ73d/PWjjd47ZUX+WBvGTX1/sapjLnyB3Q9MRUdflu2bmX6nXeRu3wFNQUCAZI7nsCx7UMc3fwY4gIBJOmIi0Qo3fM+b725nddfeYnSPbup6aTu3bjyiu/Tq2cGOry2bN3K9DvvInf5CmoKBAIkdzyBY9uHOLr5McQFAkj6MotQVrqHkjd38PqWl9n97tvU1LlTJ674wWWcecbp6NArK9vLH6bfyf3zFrAvSR06khg6nubHtOKoo4JI+nL7YG8Z7+x6k/Brr7Lrze3U1KpVK35w+WiGDzuPxhIX+RiS1AAzZt7D3TPvoao27b5C7zMHcPKpfeh4QjqStD+bip7j+aefIH/Fw+x+922quvZHP+TCC3LQ4fOPh5bw21umsXfvXqKaNo2n91nfokfvMznxpF4EAgEk6Yvs5U1FPP/MSp56fBm7dr5BVZeNvpTLR1+KDo9/PLSE394yjb179xLVtGk8vc/6Fj16n8mJJ/UiEAggSfuy9eXNrH12JU8/8QhvvPYqVV14QQ7X/uiH6NB5dvUabp5yK1u2bKWqXqdlk5F1FmlfP4VmR7dAkvZl5xuvs25NPs+ufIyXXlhLVWf1PZPrfzqBli1bcrDiIh9Dkupp0i9/zcPLHiGqddtEBp5/CWcMOBdJqo/IRx/x2D/uZ8m8P1O6ZzdROecPY/zYH6ND764ZM5l1z5/5TFwcg3O+x9lDLiCh2dFIUix6YtmDLJ3/F0re2kHUOQP6c8Mvf44OrbtmzGTWPX/mM3FxDM75HmcPuYCEZkcjSfXx9OPL+L8F9xJ+7RWiTsvK5DeTbyAhIQE1rn88tJRfT/4NVZ0x4Fy++e0LSQwdjyTVxwvPP8vDC//Kv9atIapjx69x069/RefOnTgYcZGPIUn18D8/ncg/VzxO1OnfHML5l/6QpvEJSFJDvb3rTebf83tWr8ol6twh/8XPfjoBHTp/uON/uXf2HKLSe3yDnFE/4ivHdUCSYt3eslLm3/N7nnx0MVFn9T2T3/7mRnRo/OGO/+Xe2XOISu/xDXJG/YivHNcBSToYC/8ynUf/fh9RPTN68P9uu5WmTZuixvHg3//BTTdPISqU/DUuGP1jTjypF5J0MJYvmc+8Wb8jKjGxHf9v2q107tyJhjrqlx9Dkupo8m9+y8PLHiUq59Jr+PaIyzkqGESSDkZCs6PpmXUWFTatL6TCvzZuZG9ZGd849RTU+P46ey4zZt5DVL/B53Ppj35Oi2NaIUn/CY4KBjnplD4c3fwYigqfosLLr7zC9u3bOfOM01Hj+uvsucyYeQ9R/Qafz6U/+jktjmmFJB2stK+fSruvJPHc009QYVs4zIsvvcSAb56NDt4TT67k57+4gaiTTunDD6+/ha8c/zUk6WClnJDOiSf1pKjwGcpK3+f9999ndUEB5/T/JvHx8TTEUb/8GJJUB/fdP48//+WvRH33ygmcec55SFJjSu2WQdOm8bzw/LNUeO75tXy1QzJdOndGjeepp5/hV7+eTNSA80Yw/L+vQpL+E6WkptPm2ESef3YlFf61cRPHHNOC7t26ocbx1NPP8KtfTyZqwHkjGP7fVyFJjSm5YxeO/1pnVq/KpcIrr7zKh+XlnHJKL9Rw27dv59pxEygtLaNCj95ncsX/3ESwSRMkqbG0TQyR1uNUnn/6ScpK36ek5G22bNlK/29m0xBH/fJjSNIBvPzKK4z7yXVEnTvy+/QbdD6SdCh07noSpXvep3jjeioUFD7PuUMGEx8fjxrH+Ak/paSkhAqZZ32LCy67Fkn6T9ahUyrBJk3419rVVMjPf5r+38ymdevW6OCNn/BTSkpKqJB51re44LJrkaRDIZT8NVq3TWTtsyupUPjc85zSqydJoRBqmMm/+S0bNvyLCh1PSOfqibcSFxeHJDW2lq3a0PGEruQtX0qFl195hbZt2pCe1pX6CiBJdTDj7llE9ejdl3OGfhdJOpSG//dVdEk7mQolJSXMuPse1Dhm3fNniotfpsJXjv8qI6+YgCR9GZwz9Lv06N2XqBl3z0IHb9Y9f6a4+GUqfOX4rzLyiglI0qHU55v/xRkDziXqjzNmooZZ8fgTPJb7T6JG/GA8gUAASTpUuqR9nZxR1xB114yZvL9nD/UVQJIOYO269Tz62HKihl58BZJ0OJz33SuIun/+Al5//XV0cPaUlvLXOXOJOu+7V3BUMIgkfVkMvfgKoh59bDlr161HDbentJS/zplL1HnfvYKjgkEk6VAbevGVHNOqNRXWFBSy4vEnUP3NnnMfUd++6DKSO3ZBkg61foPO58TuPanw9ttvM2fu36ivAJJ0AA88+Hei+p83gsTQ8UjS4dDpxO5knvUtoh548B/o4Dz44GJ2736fCukZvfn6qacjSV8miaHj6X/eCKIeePDvqOEefHAxu3e/T4X0jN58/dTTkaTDIaHZ0Qw4byRRix74O6qf1WsKeO75tVRo1bYd3xp+CZJ0uJwz7LtELXrgQeorgCTVYu/evTy87FGi+p5zHpJ0OJ15znlEPfzII+jgLHvkUaLOHHAukvRl1Pec84h6eNmj7N27FzXMskceJerMAeciSYfTmd8aSnxCMyrk5T/Ftm1hVHePPPIYUWf0PxdJOpy6nnwKnbqeRIWdO99kxeNPUB8BJKkWq/Ly+eCDD6jQvWcmbRNDSNLh1PGEdDqkpFIhHH6DdevWo4bZti3M+qINVGjVph0nn3o6kvRl1DYxRPeemVT44IMPWJWXj+pv27Yw64s2UKFVm3acfOrpSNLh1KRJU3r1ySZqZV4eqrsnV64i6tQzvokkHW6n9DmbqJWr8qiPAJJUi8LC54lKz8hEko6Ebhm9iSp87nnUMIXPPUdUesY3kKQvs/SMTKIKC59H9Vf43HNEpWd8A0k6Erpl9CaqsPA5VDcvFRezY+dOKiR3PIHE0PFI0uHWvWcmUQUFz1EfASSpFhs3bSKq04ndkKQjIeXEbkRt3LQJNczGTZuJ6pTaDUn6Mut0YjeiNm7ahOpv46bNRHVK7YYkHQmdTuxO1MaNm1DdbNq0maiU1HQk6Uho95XjaHNseyq8umUL7723m7oKIEm12Praa0QlJXdEko6EpOSORG197XXUMK+99jpRoeSOSNKXWVJyR6K2vvYaqr/XXnudqFByRyTpSGjVph3NW7SkwtbXXkd189prrxMVSv4aknSkhI7/KlGvv/46dRVAkmqxa1cJFY5ufgxNmsYjSUdCyzbHErVr1y7UMLt27SKqVZtjkaQvsyZN4zm6+TFU2LWrBNXfrl27iGrV5lgk6Uhp2aYtFT788EPefucddGC7SkqIatXmWCTpSGnZ5liidpWUUFcBJKkWe/fupUKTpk2RpCOladN4ovbu/QA1zN69HxDVpGk8kvRl16RpUyrs3bsX1d/evR8Q1aRpPJJ0pDRpGk/UB3v3ogPbu3cvUU2axiNJR0qTpvFEfbD3A+oqgCRJkiRJkiRJkhQjAkiSJEmSJEmSJEkxIoAkSZIkSZIkSZIUIwJIkiRJkiRJkiRJMSKAJEmSJEmSJEmSFCMCSJIkSZIkSZIkSTEigCRJkiRJkiRJkhQjAkiSJEmSJEmSJEkxIoAkSZIkSZIkSZIUIwJIkiRJkiRJkiRJMSKAJEmSJEmSJEmSFCMCSJIkSZIkSZIkSTEigCRJkiRJkiRJkhQjAkiSJEmSJEmSJEkxIoAkSZIkSZIkSZIUIwJIkiRJkiRJkiRJMSKAJEmSJEmSJEmSFCMCSJIkSZIkSZIkSTEigCRJkiRJkiRJkhQjAkiSJEmSJEmSJEkxIoAkSZIkSZIkSZIUIwJIkiRJkiRJkiRJMSKAJEmSJEmSJEmSFCMCSJIkSZIkSZIkSTEigCRJkiRJkiRJkhQjAkiSJEmSJEmSJEkxIoAkSZIkSZIkSZIUIwJIkiRJkiRJkiRJMSLIkbJ1DsM7jGQh9REirV86Se3SGDR0GMMGZpPSmpiU9+s4TptENTfmRbg+ExFmzneSGDmPKoYxe8sCRiQjNVAes44/jek0kp+s4tkfZ6HDLY9Zx5/GdA7GMG58dgHfSuIzz98ex6hbqGIYNz67gG8lIelLKcz//SCJif9g37rdyuxl4ziR2hTw1wE9uX09+/Zfs3n4rhEcS6Xnb49j1C1UMYwbn13At5L4YlkzmVOGTKSan6zi2R9nUZt3N+ey8vElPPNIAc+/kEvxdj6VwslnZtC15yD6DB1Ony6tduJviQAAIABJREFUkaQvo+dvj2PULdTZsV2z6dS+NSk9B9Fn6HD6dGnNkVFC8QM38kzKrXzn63x5bJvDhFNGkksVP1nFsz/OQlLdPX97HKNuoc6O7ZpNp/atSek5iD5Dh9OnS2u+iN58YDjnXLWQqrLv2MaU80I0XB6zjj+N6VR1I7Neu56TqfTmA8M556qFVDVmcYRRPTlk3nxgOOdctZA665jFqV9txnHdhnHqoOFk9wzRlC+gNZM5ZchEqvnJKp79cRYNl8es409jOlXdyKzXrudkKr35wHDOuWohVY1ZHGFUT/SpADElzIblueTOu4PxI86mU9rZjF9UjCRJknTYrF/CCy9Tu5fXsHI9qrA9l7/+oBP9+p7NxJ9P5cHHcyneThXFPP/4Qu6//TJ+1LcN5+SM5/82lyJJqt2bL+TyzOMLuf/2y/hR3zZ8e+xMindzWL37whxuz0kj56qpvPchknTIvflCLs88vpD7b7+MH/Vtw7fHzqR4N4o1L+fxzOO5PHjn1UwcksSQnIk88xZSvQSIZeFcpg47jZFzi5EkSZIOj1xWFoapzZuFS3gG7V0/lR+dcza3/6OYunpz1VQm9s1m1ppSJEl19/rfLuOKq+7gX2Uceu9s4P9+eTbnnz3y/7MHB1BNHobC9/9P3x6Ix+ph4uULZL6v8bQfIh1taFhL2Os9ezhdFbV2UOsKrr0DWofQ1Qq6dcbe3RpvW5CW3hq8Ww39ViGdWlOtiGwen57XOxJdsqSjpcDm8WHXBnK4xnLKPEauO/neVit5QoCouGn7/H40OwOoVCrV30v/rnJ+WGml9zyqm1jQuYWKx6voGEClitst3PQC2ItrsH+MSqVSqVQqlUr1NyEd9zLMeIboOe7gK++six3P1NAxyFVw0VhWRvspVCqVSnUFgoereOkXPka4vjqbFmB+XSKISqVS/f0FD1fx0i98jKC6qXmtPP+igyAqVXxu4UayooWBcJhwOEw4HCYcDhMOhwmHw4TDYcLDn3BS2krh7URxsHWPD5VKpRpfLqX+MB5/GI8/jMcfxuMP4/GH8fjD/NpaSLQ1B8J4/GE8/jAefxiPP4zHH8bjD+NZm4vqRmGhyR/G4w/j8Yfx+MN4/GE8/jAefxiPP4zHH8bjD+Pxh/H497IoFZVKpbo2bzqRGcd5L+43uWJZa8N4/GE8/jAefxiPfy+LUrlp9bdaaOpCKaWAUquTd7vP4fGH8fjDeP44wJ6dFvJSUBq080qLxAgqlUr11bXmQBiPP4zHH8bjD+Pxh/H4wzi7T9L0cjU5KYzR+aKV355BpVKpblprDoTx+MN4/GE8/jAefxiPP4yz+yRNL1eTk8IYnS9a+e0ZVDcQ0TqAxx/G4w/j8Yfx+MN4/GGc3QPs2bkVcS5jBN+2cKgLlSout3AzuS0J/beradlpwYCSz9VNAJVKpVKpVCqV6vrIWSgyqo3OLmLrcnGIUTkLRb56ZDoPt6FUiOXdg6x5KJe0mRoum65FL27k1Xd2kIdS8DUH7k9RqVQqVZSEmXqyVm7lVZuFLKLZ8Px+CJVKpfqySZipJ2vlVl61Wcgimg3P74dQ3fgSZmrRi9XU/qqFRSlE8XHI6UOlisct3IQ094kUcWVCH7uw11VR9J18FqQKCIKAIAgIwjxM38mnfIMV+7EA8RtCPmSjpjSf/G+kIggCgiCQ+o188ktrsB3qZugC10x+q4RUQUAQBARBQBAEBMHElmMhxhM4Zqd+TRGmOwQEQUAQBObl5lO+wUbbiSEm49osIAgCgiAgCALCI3YCXBQ4Zqd+TRGmOwQEQUAQ5mF6pIr6d3wMXSA+QzJtr1VRlDsPQRAQhFQWiOVsectF4AJX7kIA3zs2akrzyf9GKoIgIAgCgiAg3GEi/zvl1Lxmx/VxCJXquvhUpmNXDZtXLOABnYBRJ2A0LKBidRXN+1wELzCxATsbdAJGnYBRJ2DUCTR5uehTmY5dNWxesYAHdAJGncAD+flstthwD4SY3BD9TjvNzxZRsWweRp2AUSdg1AkYDQuoeLSI2jobHSeGuCJnuunYVcPmR/NZYRAw6gSMulRWrCiidrudzoEQk+lsEDDqBIw6AaNOwLjaThDol7awecUCHtAJGA0LqFi3BalniBvR8Ik29lvKqVixgAd0AkadgNGwgIrVVTTvcxE8zzgCtK8WMOoEjDoBo07A+KiNfmLrfS0Vo07AqBMw6gSMulSavMQ0fLgKo07AqBMw6gSMumyau1CpVFPgG/eKJPMFHx3vy8TS+/u9BPmCgTxRJB6dDQJGnYBRJ2DUCRh1RbQPoDRgZ4NOwKgTMOoEjDqBJi8XfSrTsauGzSsW8IBOwKgTeCA/n80WG+6BEPEKeu00rctnhUHAqBMw5pnY8KyVjr4h4heg/xBK9xeQNYfxzS1hVQVRrPyhhzE6GwSMOgGjTsCoEzCuthPk/7owhCxZqV2dzwqDgFEnYMwzUbFuC+3eAFcmRNBrp/nZIiqWzcOoEzDqBIx5JirW1bBf6mb4AhMbsLNBJ2DUCRh1AkadQJMX+NTH/meLKM0TMOoEHlxWRG2TRPACMQW9dpqfLaI0T8CoEzDqBB5cls9mi42OviFUKtVXV0J2NRUVjHGkRwZk9n9fwKgTMOoEjDoBoy6f/X1MrKueEp2AUSdg1AkYdQIVLTLgokknYNQJlNYxRuMyAaNOwKgTMK62E2Qc5wN07qundnU+pXkCRp2AUSfw4LJ8NltsdJwYYlLeLRh1AkadgFEnYNQV0T4AnGqjaV0+KwwCRl0qK1aU03Som2FiGaJfstGwLp+K/FSMOgGjTuCB/Hwq1tWwX+pm+AJXKUTQaaV2tYkHdQJGncAD+flsbrDTOYhKpbpGCdnVVFQwxpEemdhCBL12mp8tomLFAh7QCRh1AkadgDHPRMWj5TQ02ekcJE5D9EtWalebeFAnYNQJPJCfz+YGO52DXL1BF+0N5VTkp2LUCRh18yhdXcVuSWaY62u4r439lnIq8lMx6gSMOoEH8vPZ8Gw97d4A18WcYtbUiETr/VAmSCwhgl47zc8WUbFiAQ/oBIw6AaNOwJhnouLRchqa7HQOEreg107TunxWGASMOgFjnokNz1rp6Bviqg26aG8opyI/FaNOwKibR+nqKnZLMsNcZ5/KdOyqYfOKBTygEzDqBIyGBVSsrqJ5n4vgBb5UbuUmdY4os5PQEMsQ0nNFlGyWCBCLjOuwjOuwhK2uCvOKbextqsRwG+MK9dioWlaO7QRjBD6UCHwoIb1RT/ntZew4sI2y+RquRujYFkqK7QSIpKXY3sLG+zSMcdpF/ZoSavbIRJOPSdiOSdjqzIjP1LPt+WIybiNu54Z8WJ8somqPjJKMa48V1x4rNfdVcvCtbRTMZVzygRrKn6xHChAhQPd7Nszv2TA/V8aOA9VMIz5DHitlj1bhOEFsJ1xIJ1xIh23U/0iL+IyNHS8XoEelmgoh5H1mav+lHvcgSoPduFu7cbdaaagrZO0rNlZ9M4kr0X+ohs0/rcc9iEKwR2J/j8T+7WbyftZG3RMGEojhlIOGdZU0OwPENNiNe7Ab91EHuxvKSVu6jbqXK0mfzvguyEgvVtGwvY1+ogWQnQ5kp4PdFkhbuo26ukrSZxI3/74S1lfaCXLJYDfuXWbcA1refauMNG4Qpxw0/LSGZklmjMFu3K3duFutNKBH3LyXTaUGZhBJS87iQmh1cNnRNjoHykhLJYpMjzOAUgB3t0xpth6lED3HrSikFJGViUqlmgKaTAP5wG4uch91ESzRk0wkmR7JxyiR9HSuu/5DNWz+aT3uQRSCPRL7eyT2bzeT97M26p4wkMA4zvrYva6I2lYZhT4XUp8L6U0LOWtbqP1H4qAhIRPoYtRhFz1nykibxTg0zL+3ErZbidTXHwC0TOb8qTZq1yxhtxelPhfuPhfuXWZ2l+7F8s+FpN3KhIb/YOWldRbaewKM0efC3efCvauezSkiq/51B2sX64nbkIumEhONXi7r9zrY7XWQcPs51i7UcNkZF83PltDQKhOt3yux3yuxf7uZnCfq2bC+GP10VCrVV46GeXcWAg4iBf8aAvR8q6gQJAejJNo7ZJbP1TOeXmcLvUQqYFGeHghwbYbobarE/JodeZAx+r0S+70S+7eXk2yq5scvb0WcQ/xO2TGvLqF9kEsCyE4bjU6Z5I4jLJ/LZSMnbLz0eDn7+xgj2CMR7JFw76pn89wyNv1yG8tv1xC3sz52ryuitlUmUrBHYn+PxP46M8t/foRNS/WoVKqrpWHenYWAg0jBv4YY44xE4+oSmpwBYupz4e5z4T5qo3mTGXHzXiylBhIYx6k2GtaV0ewMECnYI7G/R2J/nZnlLx9kVSJXIERvUwnrNznoJ5JMZ6uVzlYrNtNGLD9fwpQ72017XRWvvC4RRCnYIyH1SEhv1tC4dCuWF6rJmsWUSssQAQmFUIgxzkg0ri6hyRkgpj4X7j4X7qM2mjeZETfvxVJqIIFxnPWxe10Rta0yCn0upD4X0psWcta2UPuPXIEQvU0lrN/koJ9IMp2tVjpbrdhMG7H8fAlTL4S8z0ztv9TjHkRpsBt3azfuVisNdYWsfcXGqm8m8WVwCzeh0OG92FAqu99EEtFCuDYXkL9ZIkB85D1VFKxxECC20LEtiBnl2E4wuRM2yr9dhr2PK9dnp+y7ZlxE0lJsd9LyqJ4x+hyU55qo2SMzsQDSKyUsuH8Lrr8Qn6GDmBdnU7VHZkLHrCxZY0MmNvmtEkwP1iMFGN8JG+XfLmPbn5hUyLOFgpwqHCeIUwDplSWUbHYRQqW6ViE6X1vCDyvrcQ8ysT4HDd/NwLxPJl49vyzhB+X1uAeZQICOnxXQeDTEGAMOzA8W0ewMEK/+1ip+9FM7/YzjrIumR01s2N5GP5Prb62iZHE50iniI2/lpUo7QcZKF7NJ48Yw4t1CxYNFNEsyk5ORNmVTss5BP0rJdxeQQyQHnR8OMcaAi46jjOH+nY8g0Xx8sBeF5JW5zEelUk2J5AyyFjOq1UnPpygNuOg4yqiHTcy7jeuq55cl/KC8HvcgEwjQ8bMCGo+GiOmsi6bvZVPbKjO+AO6GfJ7efJDJZZC+kCg2XlpdQ3vXEOOZcf82PP4wHn8Yjz+Mxx+m9iEtk/r0II1rlrDby4Q6m4r4QbWdfsbX31pOSUEV7T0BJjUo0VxuouI1FyPE58iLlTR6iaGSnLs1XHbKweZlJhpaZSYWwP16CSu+t4XOs6hUKpVC8sISlqPkbpXoZzw+3Ht8KCwt4VtzuUYy0rpsSjbZkQeZVNBZz4YH82nyhoiPzE5zCe2DjJVZwPy5XDbi3cIP/7Gc/X1Mrs/G5hVltJ8iPkGJpu9lU9sqMz6Z/atLaPpDCJVKdZ2dddH0eD5NzgDxkZE2FfB8a4CYTtkxP7iEZmeA8cnsXyey2SYTnxCdDSIlmxz0M76gcwsVj1fzW6bQWRdN/yRifl0iyMT6W2sozS+h/RR/e2ddND2eT5MzQHxkpE0FPN8aIKazLpq+l01tq8z4Argb8nl680HiE6KzQaRkk4N+xhd0bqHi8Wp+y1QK0fnaEn5YWY97kIn1OWj4bgbmfTJfBrdwEwkNybjeqGHJY/UEiHDfViq/m0S00LF6Kp9zcaUCOy20vM9YHzso+64ZF1cgYKfk0Xp8XIG+NqoeLcEeQCH3eQe2R/WM5aP+0SJsJ4jfMTOFG9oYIg6H7diPEZ9DZmzvhRjjhI2qYjsB4hBw4XqfSci0PGfGxSjtMgsHuz/hXDhMOBwm/N/nGPC1UHkfCq7nKrG+j0p1TYYP17D+RYkg8QrQXllCcxdxkd62EyQeAZpfb6GfSCHcv6ykfZBR2ZVsOnAS55/DePxhPP5zvHfsCGvu1xIp+HYJtkNDjCXT/tNCGp0BrkifjQ1rttB5lsl1+eglFgOLTQZuCKfsPF9mxj3IFenfVYS5wcUIEeaKLFqIwu7jXkZQGv7QiUQMbzs5eR6lLidHBlFYbDKRgEqlmhp69PcaGGXF/X6ISMMfOpEYlfNNA8lcX9LbdoLEI0Dz6y30Ey2E+5VCGr3EpdPrYnIa7iqwkI5S0FmP+Ttf44H8JdTW2ZC83QTPcu2O2mn3Epfg2yWYX/cRy4h3C+bVNvq5EgHcLxby/D6ZePR2+YjpsQKyZnKJj+Y1RezvI35eM+stbQyjUqm+WkKc/NBBtPTpGj43S0R8DKWjDtx9xNYlcagLheUPFZDMtQjR2VDChl0yV2RQorGsjPZTxMFHbxcxpa8QSeeSAQfPl5np5AoM2jGvqaeXODSZafQSBxeNjQ6CqFSqqxPi5IcOoqVP1zAqROfrlTR6uUIB2v+thV6iyez/aQntg8QhQKfXRzxGjppZX+ciLl4XnUyVIToshTQ6A8Rt0I55TT29TJ3+bokxZiWRwBdCdL5eSaOXKxSg/d9a6CVaCPcrhTR6iUun10U8Ro6aWV/nIi5eF51MneHDNax/USJIvAK0V5bQ3MVN7xZuJHtKSBUEBEFAEAQEQUAQBARBQBAEpn1tHqbSeqQAo24vY+9b1RiINoT0phkfkfSUNXn55FyYcDhMOHyOT7r3svE+ovhwdgdQCiE1VmIPoKBdZuGIfI5wOEw4fI4ByUKBFqVjW7EdGCIuf3Gx5dElWI+hoP/BXlo25aJhrMBbFmqOoXRfJS2+TzgXDhMOn+MTXwuV96EQ2G7G9j7xu6+SFt8nnAuHCYfPMSBZKNASJYDtWDdKIaQmM21E0YpsdHzEJ/8dJhwOc+6/vLQ8I6IlDgEf0iEUyjZspGB+EhouuVWD9u5itu3cQeF9ImXrt9HS5uSjASfVd6NSXb3zLvbUWQkSSUveT47w7h/DePxhPH88yfafFJBMJBcNNgfDxEtL3k+O8O4fw3j8YTwfeLGU5jKGJNEzSIRuPtgVIJL4eDXLs/Uk3MolGmbMESm1tlCaLSI+tpFNO47QcuwTfrw4iWjBfTWY3w6gpGf5Zifv/jGMxx/G8+dP2LNzK+JclLxm1r8iMcJVyiwhJ5MJmCnVCRh1AkadgFEnYNQJGHUCRp2AUSdg1AkYdQLGBhdXL0C7pYT2QZTmlrHhwEmcfw7j8YfxfPARr5oLSUOps66QxqMhRumZLxpQ2OuiByW5ay+xtfBBDwr977fRS6Rq8u7VoFKppk76XQVEOuT1MSpEz3Ero7TkZOj529CS95MjvPvHMB5/GM8HXiyluYwhSfQMotTXQvP2ANHSVm6jyXMOjz+M58/neHfPVsS5xC0hu5ofr88llmBPG7sbytmwbAEP/L8CxjwTmy1W2p3dDF/g6qUUsGbPSZx/DuPxh3F6nGxYqSda58+sSGdQOu+i+SdmOlFKNm2k9v98gtMfxuMP4/Q42fSESDKRArRXVrG/j6v2iGhiBhcF91lo8KKUXYnlN5/g9Ifx+M/x3m9aeCQbheCbZvZ3oVKpvkJGvPVs384YOekZXJREzrKNJBOpDem4TCy9zhZ6iVRG3r1JXJRLqT+Mxx+maT1jrDkQxuMP4/GH8fy8mGQuGvHW81KdCyUtOWv3sueDc3j8YTz+c/z6wA5WmbQoDNox/9RGP1fLwGKTgYtCuH9ZSfsgCsn3W9h+7BwefxiP/xy/3mMhLwUl71b2Hx4iPnpE8xHe/WMYjz+M5+QAr64VSSZKq5OeT1GpVFdhxFvP9u2MkZOewWWfSrTX+VCYW8am33yC0x/G4w/jOfkJe3ZsJIsoXU7kARRGnDYaJaJoyVm7lz0fhPH4w3hOfkKLtZqcFOIkc+j1eoJEmVvGhgMDOP1hPP4wzmNHWLtUz1Qa8VppfDOAQkoBa/acxPnnMB5/GOexI6y5X4uCt4bdh4aYEqfsNG6ViJZzZwYzuORTifY6Hwpzy9j0m09w+sN4/GE8Jz9hz46NZBGly4k8gFJfC83bA0RLW7mNJs85PP4wnj+f4909WxHnEieZQ6/XEyTK3DI2HBjA6Q/j8YdxHjvC2qV6ptR5F3vqrASJpCXvJ0d4949hPP4wnj+eZPtPCkgmkosGm4Nhbm63cBPLfWIHXvcOCucSQxIFL3/CSd8R9jZtpHJFLhnrd7DtBwaSNFyiIWl+Iebnq4kmBwZQON2G/YUACvdZcNg3Is7VcJEG7bc3svfNarRECmB9R2KISVyQsa8pxHwMBe33WzjSVIieWGQOvuFAQVvJwQPbKL47CQ2f0ZB0dzHb3mqhkEg+tu6WCBEHbTVHDm+j+O4kNHxGg/bbG2lpLCNa4NQAQ0QIOWl7IYBSLpZ3DmL5bgZJt/I5zWwDxS8fxPF8LpO6ECKEUvefZGK6vYy9riPsqK2keHEuGVoNKtW1GDm+l8YuFLJ+1sarT4mkTeei6XpyntpL3VoDCrtsSH3EJa/WyatPiaRN56JZBhZttrI2kyjd9P8XEUIwiIJflhkmhukiaw4cofYFC8sXi6TPSSKBaD4ONTpQymXNgY/YVJpL2nQuujUJvVhN7a9aWJSCQnB7PYf6iEtWaQstH4Tx+MN4/GE8v6kmnRtAVws7W1HKttD0mx08kq0n4VYumpVBXsVe/t1aTDKRAjS/3kI/o9JNJaQTYXAvnV1E8NHZFiC2AO4PZUYF6DwqofCYyPxEVCrVlCgkLRWYb+IRRgXbnPTyBR8f7CVCCd/IBP4HJHN95dU6efUpkbTpXDTLwKLNVtZmEqWb/v9Cob/DTgdKyQ+38O8vV5KVquFzt2pIM1VT+6sWFqUQJw1Za1t4tTSXSfW52L+9CvOKBXw7ZwHmBgfyp1yhQizvHqTUpCfhVj6XkJrLIy8fwfKwFiUb+w/LRBqWbDR2oZD8cAtv7LEg3p5EAhclpOay/GcHqVufi1Ibjb+SGCEOKQWs2TOA0x/G4w/j8YfZcH8SF8n8dpcDhZRKXv3lNhZlJpHAZzTMyCxmQ2MLIpF87DwgMYJKpfpSuxBieLCbjqZyfrjMTCdRUjaSd6+GLyTcI7I8BYWOVol+ovlw7/GhUFHMt2ZxDYb4bbOZXiJpWWR1sn19IfpZGi7SkJxdxtr/z8GabJQkM/ucIeKSXYnlN5/g8Yfx+MN4/F5WZXLRmTbaXwugkG2hzrqRnDkaLtKQbNpI3avVJBMpwO5DEsNMLq/2CLUVImnTuShRS976HawRidLN6TOoVKp4XQgxPNhNR1M5P1xmppMoKRvJu1fDZTMLWNt9kpY9e9m0thIxO4NVL2xjeWYSCVySmIR+sZmKCqLInD5DhBB/kLYQRClrvYNX1xein8VFiUmkP7SVV20WsohDn0S7hFJKMZZf7eCRbC0JXJQwR2TVz49geVjL1AjxhzYzvUTKZW3zQUpNehJu5XMJc0RKrQ5KM1HYb99LP1crxPCnMp37zDz9YAntg0QpYFGenstmFrC2+yQte/ayaW0lYnYGq17YxvLMJBK4JDEJ/WIzFRVEkTl9BoX+DjsdKCU/3MK/v1xJVqqGz92qIc1UTe2vWliUwuT6JNollFKKsfxqB49ka0ngooQ5Iqt+fgTLw1qmysjxvTR2oZD1szZefUokbToXTdeT89Re6tYaUNhlQ+rjpnYLNzHX6+UUFNZg/3CImDRJ6O8WKfyBhW27nXxUK6JhLM2MrxHN95cQkYZcEjaUCn9URu5tjKG5v4iqO0UKKzayzX4QZ/cAnzQWksTEDq4voWRnAIX7LDgai9Ezjj4XbYdRMKwro2A2Y80tpHI9CoE3XHQzOfH5SsTbGCPJIFJIlNNDhIjwoYsWojxlpvo+DWNpyK2ooYxJ3JZEKkqO0nnMK6ii/i2J7kAIlep66Tlej1IZq4oMjKUha0UlOURqw/1hgMlVs+phPWMZyLqfKD5GzhNBQ0ImCr0N+TycX0LDLge9p4YY4Qp429jZhUJyhYVV2RpimlPMj/+1DKU22jtkJpVpYa25mPRZ3HA6D2+ll0haVq2vJms6MaU9ZOXHK1GS7Lj7GJVpIj+FCD463pe5rM9LRxfjcn/YzTCXfOqlsxWFR0QTM1CpVFNqZgbpCxnV1UZPHxd1OTkyyKil2cxLBFL03MX1VM2qh/WMZSDrfqL4GDlPhAA9v5NQEil7qpg0YphTzPefNBA/PXmbnfx6zzYeEfXEZbCb9roiVvzjEpq7QsQr3Wxm0Rxi0LPoyRrSUep4v5thvhCgY58NpQLWPFNMGrFoyFprZW0mCsHX2vjDeSYl/rONUpOWBGI45aLjKArpT5aRN4ux5hSyogKF4C4XMiqV6suicZmAUSdg1AkYdQJGnYDxf03j24YFPL3JRifRtCzaXEVOIqMSRfKfNKAgOXD3odQlcaiLCFpKRRMJXIOBNtp3oSRaWPOQnpim51L64lbSiRSgSXIywmQMrPmXrSzKTCKW4d9L7EdJLCsjazpjJCwsYuV8EfGxjWywHqTp/wzw3guFzGAy1ax6WM9YevT3EEUieAaVShVD4zIBo07AqBMw6gSMOgHj/5rGtw0LeHqTjU6iaVm0uYqcRBQSZupJNxWyfP02ag98xNqFGsbSkHgbUXyMnCdCNx/sJUolZU/kksBYCdmVrFrJpIIfSrhRynnGzKI5xKBn0ZM1pDMVfLi3o7SyhuWZjDU9l4ceF1GQJHoGmZRUmYpRJ2DUCRh1AkadgFE3jW9nzKO0cgsdg4yR9RMLi+eikDBTT7qpkOXrt1F74CPWLtQwlobE24jiY+Q8EQL0/E5CSaTsqWLSiGFOMd9/0sBkgh9KuFHKecbMojnEoGfRkzWkMzV6jtejVMaqIgNjachaUUkOkdpwfxjgZnYLN7nAe/WUfCOb8j0yV+QvQ8jvt2Gvq2LJE9uYjHzCiZIBU4aW2HLZ+MER9jZaqHy0gNz5WpI0TMp1zEU0rcFAxm2MK/QnLw6U9F9PJTYNCwyFKAS8dH/MpHIz9MSk0aBhYgH5IwIoFf/vbDSMY7YB8btMLEmk8Fkt0eRDVmqusIZAAAAgAElEQVSK81mQOg0hdQH5pTVY35LoPh1CpZoaMrKXKHpmzyK2uQvIQan9hMykFmejSySmRI2WiRkQywqIFuyx07yuiJL7voZJN4/S1VU07nLQeyrERPq7JYIoLV5oIoHxzbingEUouX/nI8jE0lcUkJXIDUhGPh5AqYS8ezWML4m7xGKUJNwfBhiVi/FxLZHcR10EuWj4Tz7cjEpev5W1mYx604nMRSPvS+wmUiFZdyahUqmmmp75Ji2jJDreD/CZ/vfb6GVUujGDZP4GFmejSySmRI2WiQ3QdxSlFJG7bmdc6fcUcKWSTZVs2HkSp8dL7csbeWRpLmlMYrCNhlVltJ8iLnn3GBhXpol8orT76OeS8x/R04rSwkJy5jIBAznLtCi18EEPk6hkuahlPCOyFwklXWoqsWmYd2chCoNe5AFUKtVXVNZ6B88t1RItfWEJ6URqQzouE6n//TZ6iZBSxbdMGq7FyJ+8SCjlLBJJYwKZIvkpKO11ITOJzBLysjWMp7/PiZKBrDu0xJZL6ZEj1L5g4ZGHCsi6XcuMRCa3OBtdIjHN/gcRlUp1fWStd/DcUi1XYuRTmV7JTvOzS9j8SyY2KHNyEKWHReZPZxxJzDcVMpnTsoSSlpy7MhhXpok8pkDfR3xAlP+ZygxiS8sQUbLT9zFTLvnhFixPGUggfiOfyvRKdpqfXcLmXzKJAfqOopQictftjCv9ngImc1qWUNKSc1cG48o0kcdUkJG9RNEzexaxzV1ADkrtJ2RuZrdyI1nRwsDuYrTEFhoaYijgpOW5Kmr2yIySsT1SQobPSfXdxBT62If0H06kww6c/yHhOsEVCf3Fh5Ke1Nlcd4HtZqyPiWy8T0MsQ6dlojmKUxGKiZMD+WPg60xAyzQNsWlTWcAkLoSItmCOlvGlor2dSWgQf+rAcqwQ83sBYgp0I73RjfRGPZ/RL66ker2Zym9rUamuXoDgUaKYKdWZiVu3TJBckpnAdA2JxDZ7jglwMJG0lduoPZ7Phl0yscl0tlrpbLXSBCTPL2ZljZkVizOYgdLp/5JQKmT+HRomlJLKPKJ0y5wGkhlfTnoGV85Ck38jWVxPAYJHUVqazbxEJpSctoBo8sAAoOUL879ZRjJbCHJJq5OeT4vJmxmi57iVSIuzK8n67xro4pI2OrssZGVCj7cFhaVF5KSiUqmuA/1dJUA9X5COexl+KJvOoxKR8u4y8DcxXUMisc2eYwIcjC8Egyh9U89sJpCiJwdwc+USUg2IKw2IK2EDMDLYTc/vnfz2aBueow46+1AatGN+pYi8lwuZwUQKmTeHCehJWwq0MmoQznPJmQD9RLl3AWlMbPYcE+BgVIDTg0NAEuNaaEA/k3ENn5GJJlWmYqwkTg76B4BUVCrVV0mKyKp/3kbZQxkkEEOmyOJM6O3isg7JRXClnmQ+I+NulYiU/mQBWVyb4TMy0XIy9EwslbRvAq2MGhzg9KeQPpPxLTSgZ3znz/pQ0jN7FlPrH5KYQWyJ05NQqVRTLEVk1T9vo+yhDBKYSIhgl4T7uIT7sBP3URf9XIG/hhghij6VZMY3O0XPZM7/NYCSibQUJqBFtxA4yrU5E8BNlDoTxjri1vOfAcjWMjX0iOYd/PgJkWQmEiLYJeE+LuE+7MR91EU/VyIEgyh9U89sJpCiJwdwM77zfw2gZCIthQlo0S0EjnKNAgSPEsVMqc5M3LplguSSzM3pFm4imqQktPMLqN79EUfWa1FyUfOLNoZQGvrQTk3BPKbNyWZJcRX1b0i4TnAT8WF+vgWZvycTei1/QxqmzWByt+WyUerG21RNwe1MSj5kpUpMxfSjNmRUqhucPpVkroUe8eWPeHfnVhbN1zKZYI+dxvIFPPxP9fSe5froCnGeic28TcOXXe/ZEJES7hFZTCQrHceHAB8f7CVCIfPv0KC/u5JRPjq7ZcBHZ1uASHliLsmoVKrrIeGObEQivCnRM+Cls5UIldw1n78NfSrJ/A0lTmMGUyMhJYOsxWWseWEvTR2fsMdaSRZRdrXQMcBN4/TZEBOaOY1EVCqV6hqlZJCzUCRnZTUbrE5+fewIax/KYAbjMSCWFaDQuhf3ABf1SbQfJYKBxQsN3DgGGD7LxG6bRgJ/Z//P15iBSqW6blIyyFkokrOymg1WJ78+doS1D2Uwg/EMIe+r4em8aTzwnSWYN9Wz/6iLfq6/hNu+xtSbRuJMvgT0ZC0UyVlaSenLB9nzwUlqK0SSb2UcQ8j7ang6bxoPfGcJ5k317D/qop+/gcRpzGCqTSNxJqopcCs3JQ3ikxbEunIkImx30t1YQC4XhY5toSDXjIsot+dSeL+I+L9NZBuyMQzYmCaauTHoKVispe2Qi8sOmak/UMS2ZUl8Zfw3cUrC8IOtHPzBVoZ6JNoOtyHZHdiOyYzH9doSqr5xkoNP6FGpvtw0pInVWMRqnhvw8duje/EccnDkcDdBYgseruFHrxg4YBZJYIot1DKbiYgkz+JLL+cftCgkmsirgObtXLb7uJe1aT6ODDJqYQFZqTADEyJWJC6Sft/NsHGAji4iiIj36lGpVNdJagZZmSB1cUkL7l0ajhBhsQn9TL6cLjCJAO2rUzG3EqGa7Se3kpPIBJLQP7SNHw84KbH4GOXg5CkglQnInD4DpPJ3lzYriQnN0TIDlUqlis+aA2FKs5kSafcWkkcbHXzBgXQ8wKKHtPR32HETYWElYiY3ED3Js5hQzj9oUalUXw5rDoQpzeYahehsKKC0zkW0tOxCvrVQJOuebNLvNHDaPo2KOqbWhXNMvXPwV24aonWA2oe0XJsQnQ0FlNa5iJaWXci3Fopk3ZNN+p0GTtunUVHH1LrAdXAO/opqCtzKzUozjSSidSN/DLlfB0ISlu+acRHhvo0c3GOm4OsaFIaYlOY2A+BjlAP5Y+DrxOR7YQGV75kwLRMRcwxkz9ejTdIwMS3F9iO0LJOpuSOf+gCXBLA+Z6Xk/o3kalDQ3JZKtI3/EcbyLW4YmttSieb9kwz36YlNRv6QK5Y0X6R4vkjxU1vZERpC7nHi+g+Jg79owf5hgEhtz9nxPbERAyrVldKQkAl0MWpxC+/uKCaNG1dCqgFxpQFxpYUNF0IE+3z84fcH8bTa2S3JRAput/HbJ0XEFD43+x9EQGKUg54/hViUqmFcAzI9RJmbygwmkkRiIjcoLckLgaOMavVy8nwxyYmMK/ifXqLpU76Gkob591bCdiuX7XUhzffSy6h0MZs0/q/UDLIyQerionYfHfd4cRMhs4D5c1GpVFMqlRnTucRA1v1AF5cEaKrbQqT0ezNI42agISET6GJU60f4gWTGMfAREhPRkpahhdYAo1r4oGsrOdlManaqHvARqa8/AGgZn4/+/iHITCK2Afp/h1KmhkQumaUljSjHP6KfXNIYn7/PiZKB2ckaJnTbNBIYX8L0VKKVvhNmzTdRqVSqazO3iOUry+nYxWXSIYngQ7m4WyUi5T0kksa1mzFLTzR3t0xptp7xyfT/DqXMVGYmMqEZ06cxkcTpBsDHKAf9A0AqMfW+toCXnCay7hfJudvA/Nv1JM/UoFKpbg4jTgvr61xEynrqIHXPFJCciMIwk5iexGyifCjTTy5pxNb/n91MJnG6AfAxysHJU0Aq4whw8hDXLlFDOtDLKPHVk9Q+rOdGM+K0sL7ORaSspw5S90wByYkoDDMZDQmZQBejWj/CDyQzjoGPkJhY4nQD4GOUg5OngFTGEeDkIaaAhoRMoItRi1t4d0cxaXw13MLN6gIxpJJ0G58L/UcbWwIoVD9vpuDrGqKFhj9hMhl3ikSTPpCJTcb7Xjeuwzbqf1TCktwFpH5tCbYTTCj3eQe2R/Vwm0jlvxag8L4Zy06ZaEkZBkSUXN0yN5KkOXoMKDne8zHEOE77kN7h2miS0N9dQPFTW2n5wMmOxSgFvHR/jEp1FTJIX4jSIS/+89w8btWQfHsu4koLG3Z+xPYKLUp2+j7msrQMkWSUDh11MsL4gsf3IqGUd3cGM7hZ6dHfq0WphY7jIcYXwH3IgVIB6XckEW3GPSLLiTC4l52/cBApJz2DiwxkFWi5bHAvO3/hIFL6CpF0VCrV1Eplxkwu099dyURy0jO4bJaWNG5UevT3EKWNzi7G1fv7NiajzyxCKcCuPW0MM7nTAzLR0mYlMZndR52MMI4+L+5BlO7JII1LEhcwfylKRx24+xjfeReetgAKKQWk3841mXGHgRyUPviTjEqlUl27JHLuL0OhdS/uoxLtR4lQgHivnqmQcEc2Ikrudol+xjfilTgyiEKyaEDPtdGni0Rzd8vEJtPj7KbzqI3mTSU8vWwBD2QsYX8fKpXqphDiD9IWgkSqpuKZApITiRLi/F+Y2MxU0jJROiTRc4ZxDNHjdDCZtLkmonX83se4upx0MAVuN5CDktQjM8KNJsQfpC0EiVRNxTMFJCcSJcT5vzAJPfp7iNJGZxfj6v19G5NJm2siWsfvfYyry0kHUyGD9IUoHfLiP89Xxi3clEK4dm7DQZS79aQm8bmh0zLRhv4SYiwZx84WJqMxmChDSXrOivQXxggds2M9jNLdBWTfzoSW3J+Lhov0j5qx3I1C23P1tJ1G6fZsCu5GQXrOStsQN467TRRpUXpjK9ZjIcYK4fo3CzYmcSHEUJ+PtresmNcUkf+NImwnGMfX+JqWKBo0t6JSXQUN8+8pQ6me5l3d3DhCDA9203nIRrOlnIplC2g4GiI2DTOTU1HSwv9gVGYuy1NQCG430+wNEdMpO6/8iwOlAsR79dzM5n+zjGQiBWiuq6fzLDH176vmlVaUxEJy5jLWLBM5S4ngo7eLCJXk3K3hC/rMIkb56O0igoHFJgMqler6mnGHgRzGU0zWfA2XJU5jJjeqJObfW4iSj52/sNNPDKfs2Cw+JjNjYSGlKSgE3yzjpbdlJnTKzs5f+FAqQP8/NUyqyUKzN8RYAaSfm3GjJN6bzQy+oCVLLECpjcZX7PQTS4jO7dU0dqGQvFLkrkSuzdxs8jJRcG+10vEpKpVKdc1miMWsIpKDxme34CbCyjLEuUyN1FzyRJQkM437ZGI666L5n830EknL8oUmErg2CXeaWI6Se6sV91nGGPHa2X0UpcwC5s9FpVLdFIYIniLKEMNnGeuUg/17mYSBrAItSjaa33Qxwlgjv9uKbReTmnGnCRGl3l9spf0UMci0/1sNvUyBxGyyVqK0vZ59J7jBDBE8RZQhhs8y1ikH+/cyiSTm31uIko+dv7DTTwyn7NgsPiYz404TIkq9v9hK+ylikGn/txp6mQoa5t9ThlI9zbu6+aq4hZtKiKFAN20vFFH4nItohsdEDIzPtqaELe8FCPGZEEM9bdQ/kk/JzgCTml1A8bNaFAL15N9fhf39IUJ8JsTQ+zaqvm/Gh1LBjwoxcAU0uVQ+X4mWCAEr5u0uQkQyULimAIVAPUsWV2E7FiDEJaEhut8xY7rDRH5pDfVvOHD1BBgK8TeQS8GGXJRcmL+7BPM73Qxd4HOh0z7sPxIxbfYxsRDSc3q+ps9mSXEVW7Y7kD50UL64iPr3ZIZCXBY63U3bCyVUvoHS/SIGLSrVVZmxsJhVKSh0PCvy9GttyGe4bOSUi91Pz2PFiiJq66y0Sz76Pw1xvfXvKuLbhgWUlpfTsN2G29tN89NLaNjnI3iWUWdlOndV8ZLFh1IJ35jPqESRh2oKUHLRuGwBG7ZL9J/logtDyFI9G75XQvsgCunrzSyey00twVTGGhElr5nS7xTR7JQZucBFZ7rp2F7EDyvtBIlkYM0zJaQRi5acxYWMa6mJ+TO5bMadJkTGkVJEViYqlep6m5tNTgqxLRSZn8JNI3lhGY+koBB8u4QfrrPSORDicxdC9Du38PSDJUjEIVEkf00uSgHanzZRscFKx4kAIxe4bGSwm46mcioeLKF9EKWVZYhziYOLxrIlNOzzMXyez42c8dG+qZANbwZQKmPRQi2R0pZWsyoFheDbJfxgRQ3tXUOMcNHIgIv9P1vC+joXSoU887hIAtfKgPh4AQqD9TxdUsV+b4ARLjk/hHzITGmeiYp1NTTvctB5IsDweVQqlWp8iSbyntISqb9PJtLy+0VmMLHZXy8kWs+fZEb4v84P0d8XYITP6Fn8RDXJRArQXmmi4md2es+EuChE0Guj4Z8KafSitLSelSYN12xWAYue0qIwWE/F96po7xpihM+EGO6y8dJTZnpRyisrJB2VSnXzsvHSui24B0J87vwQslTPhu+V0D7IpLLuryELpc66Qp6ucyCf4aLzQ/Tuq+KH391CL3FILWD5Y1oUBu2Yv1fObm+AES4aOSXR9E8mzK1MkSS+9VA1yURqo3bFEpqkboYvcNGFEP1eK+a8BVSsrqKxyU5Hl8zwWf6ObLy0bgvugRCfOz+ELNWz4XsltA8yqeSFZTySgkLw7RJ+uM5K50CIz10I0e/cwtMPliARh9QClj+mRWHQjvl75ez2BhjhopFTEk3/ZMLcypSZsbCYVSkodDwr8vRrbchnuGzklIvdT89jxYoiauustEs++j8NcbO7lRvJnhJShRKuirYSy/cNfEGbYcKAAx8RAm2YxVTMXA0N4horxW8UYQ8w6piVEoOVCd1nwfKoniuVtKyMmvus1BzjMt9zFloePUjZ7Vymf9TMxsY2trzPqGNWynOtlBPDCRfSG/V8RltxkO7GApK4vgxPWKiuzac+wKiAxJbCBWzhSmkQn6yn8IUSHEQ44aBGdFDDZLQUP7EEPSrVVZou8sjGQpqfdjAqQMeLS+h4kRhkZKeD3Xwml7W/cbIqk+smbamZUlsbTV2MGpRorsymmcll/aSEuxJRSHvYwppftdHoJYKMZMlHsjCxbAs/fiKXBG52ehbXWNgvmekkQp+DhhUOGphY1norq7I1jCf57gJycOBmrHRjBslESM0lbyFIRxkjeWUu81GpVNdfBt8oArYzRrIpmzRuIrMKWFVTwO4NbUTq31VF6a4qrlb6Y1bWvJtNo5cIAdwtVbhbqohPLmvLCplBnAYlmiuzaWZiebUbEWehNF3k+5uLObTaTpBRQWc95u/UMzEti6xbWZTKlEh7yEzpL9to6mKU18rmZVY2E0OfC/euej6T/NhB3n6hgBmoVCpVLBpyvlNF8mtmgsRSiZiXxGQSNRqiSevmYVrHRUtb+PXPi0kGEhbW8OOHW9jwdoBRAdyvl1DyOhNLKcZiLiaZqaAh53Eri3YV0T7IKK8V83esTCjbwpqH9KhUqpuFFr3RAK0+IgUPm6k4bOaqZFZSUbGViu0BRgVwNxSxooGrlETeagt5b5bTQYQ+G7XLbNRy/SQsrOSZpfWYWxk12Ebj99toJIa+btytVj6XvZWWA9Wkc71p0RsN0OojUvCwmYrDZq7KrAJW1RSwe0Mbkfp3VVG6q4qrk0Teagt5b5bTQYQ+G7XLbNRyHU0XeWRjIc1POxgVoOPFJXS8SAwystPBbj6Ty9rfOFmVyU3rFr4UcrG8s5WC2Yy6uwxLhZa43F5G2fe1KPhkZKJ8vRDbOxZyuQJaEcsr1Rg0XAUDlc9XoyVSG+aX2xgiwm25mLdbELVcmfssOGoLSOJv4DYRyzsWcomHluLvFzKhucVsfbeSXK5c7lM2LCu0qFTXIu3hrdSu1HNltCyytrAqk+trei7lr2wlL4Urlnz/Vn5caiCBKIkGSn95hNJsrkx2Ja82biRrOl8KCXdt5NW3NpLFlckqPYhlbS4JTGCuyKKFxKAl/x4DSnr092qJZbHJRAIqler60zDvzkJiWXxXBjebtJJtWB7WEpfs4v+fPbgB7IFAHP///nzIlEIfrHF5GGKU1dRiyqqVPMRxHvI9XH1D3b9Wfbsv565w3fei7kR3FdcldHeZkoeKKCky5eEmE52nyloqa5ipxBz2b7/u020jRp4+vF8v2iVzaFEJ9P3bG/RtzhFK4sa/ptHnQsokvlUK1Ti0at3T+FXvWA6kWsfx/GVEL6pxOGK44rez+U2XWI6aSkn0//0wEqM5PM2H8fCQDpyDJB1E8w787EIOqNpdXUmszCFVuyiFKziIl7P4lLAYUh6cx9DuMRyW6A7cM3E87Wpz9NTsym/GDyOewxCdwh3/N4DGUUiKII27DePGaMqkVs9+tIumhLUbsiipIom/mM4dzSmTat17kUIZ1OvHr8b0ohplkUS77ikcHbG0GzKNzvU4PNG9GPbnATTm+GjcbRg3RlMmtXr2o100JazdkEVptXqPZlj3GMqkeS/aJXNo9frxqzG9qEZZJNGuewpHS63uIxnRM5bDE0O7MWn0uZCIFiTSNezKyIWzGdyyIiVVpcOIeYy7JZaDiek0jEWLxzGyRzdKeGEemVvYT8WWg5m3Zhz9GnJoLVNJmz+LwS0rcqQqthnIqB6UkPPEEMYs2UVxFVsOZtb8NAZcE0NZxPYYzfJXBpN0NsdNxZaDmZc5mq4NOYgYOjwym/F3NOdQYjuNZnbGaLo2pIxi6TpiEbMf60As0g8VS8ojy5kwpCu1KIPoFPqMW8SwLrEcDxUuHMCjM6bRp1UMZRND4q1p/GXMABpX4sBCKdzxwgZG3N6BWhxKDIm3pjHludFcUZtTyjnJw5iwZBp9UmI5pOgU+oxZzV8e6EAtDiWWuJQE9tebZheyn7jmvdnfAK5oURFJx0e1C1rRmNK6EndBRSJPLO0eXcSjt6ZQjYNonsqjfx7GlZUpm1AKdzy3mmG3plCNsquVMoBhC+YxqE0sZXXlvbP4yyP9qMX3i+87jadH9aIW36cisb3TmDp7NO3iYjikel2554U1PHprAhU4uio0H8yjU9Lo0yqGsqjVcTRpaYOJr4QkHUICiT0S2F8MnZNbUYEyqNebfr9M4vvtpIRKTej86BrSHuhFbDSHVKvjSCa8MYs+F1bkaKvQfDB/WTCOzvU4tOapDJsyi77NKyIpwoQ6cM+UcXSux0HEcMWvF5H2yEjat6OEeYsy+ZJSKiXR97nlDOoYy8FUazOSxx68kzjKplaXNJ7+6wASozmIJG78axp3XFOVo6Z2V4bOXMQ9HWMpi2qtBjBiRhrtanP8hDpwz5RxdK7HQcRwxa8XkfbISNq3o4R5izL5ktJiaffoIh69NYVqHETzVB798zCurEyZ1OqSxtN/HUBiNAeRxI1/TeOOa6py9MSS8shyJgzpSi3KIDqFPuMWMaxLLJEuSKRpmERKmxS63j6YcbNXs23NNAZcWZUDOrsJ/SasZtO8cQy4JYUmMXwrpgkptwwjbfEmsmYMJqk6VE1oRVeKG8/4F7I4kIpx/Rj3/jY2zB7HgFtSSLkohrCYi1JIuWUA42ZvYNvi0fSKq8gPE0Ov+0aSQHGZDPldGlmUVDGuFyPnZbFpcRojb+9KSstY/iOWpDZdSR2RxqKNO9nwfCoJVTnuKl6SyrQ1m1g0aRj9rmlCDN+KbZlCv9+l8cb7a5j1iwQqUjZVL0tl2vs72bQ4jdG/7EdKmyRiKaZhEik9Uhk8YRarN29g2i+TqIp0tFQl/vZpzFi1mkcfGUDn5BRio/lOtbgUEnsOYOgzq5mf8Qb3tI/luKrdlXumbGL+glkMvSeVlOQUYqP5j+gmJCb3o88DaUxYlsUTv+1FbCUOrnwsKUNmMWPVap4YM5IbO6YQG82/xRDbqis3DkljwrIsnvhtL2IrcWqq3ZV7ntnA/AVvMGxIKimtmlCNf4tuQmLHVO4Zs4g5S97gni5NqEDZNG7Vm8aU0r0V9aPYT4W4VrSjlJtSiItC0rHQPpbqlHJhK66glAtbEVuTCBXLFb99g6lvz2LQTV2Jr8e3opuQ2DGVQeOWM+eF0VxR+0wOS6UmtPvtG8xZtZonxoymT88UEuNiKK5aXAqJHVPp+0AaTyzYxoxnRtKuYUUOT0Vie45jRuYbDLqpK/H1+Fa9JFJuGsmI17Yx4YGu1CrPIZ1zcSrD3shizmvTGHp7PxKbx/Kdekkk9hzA0GdWM3/BNPpcXpVjpULDXtwzJYs5M9O456auJDaP5T9iiU/uyo1D0piwbCcznkylcWUkqUwat0nlCkqJvpMrW1WkbCoSf88i5kwZzY0dk6jFv0U3IbFjKveM6UYspVWlcd80pizZRNq4cfTpmUJ8Pb5Tq3kKnW8fx6MLtjHjyQHEhzhmKjTsx9C3tzHjmXH06ZlCYlwMYdXiUkjsOYChz2xg/szRtGtYEUmRqULDfgydt4knHhlA51ZNqMa3qsWl0PmXaUxYlsWjdyVxDlWJa9GVEiaPZ95H7K9SAjc+uYE5M9O4o2cKsdF8q14SiT2HMWzKBqb+dQCNK3FYarUZyRMLNvDoA6mkNI/lWzHEturKjQ9MIy1zEYPaxBLFURZKos+TG5i/YBZDb+9HYqsmVCMshthWKXS+fRyPLtjGnCkjSanNcVehYT+GztvEE48MoHOrJlTjW9XiUuj8yzQmLMvi0buSOIeqxLXoSgmTxzPvIw4glit++wZT357FoJu6El+Pb0U3IbFjKoPGLWfOC6O5ovaZHI5abUbyxIINPPpAKinNY/lWDLGtunLjA9NIy1zEoDaxRHG0VSX+9mnMWLWaRx8ZQOfkFGKj+U61uBQSew5g6DOrmZ/xBve0j+VUECj8BpL0PVq0SqZIlXOr8dBTLyBJJ8od3ZMpUr16dWbNmI4O38233Mradeso8tBTL1Dl3GpIkWjlnwL0fZgS7phZSN/mSIfl3lt/wvZtWymydFE6Ojw333Ira9eto8hDT71AlXOroQjx0Xj+54r+vM1/NB6ynLTbE5Ai0e9/dSsff7iOIrNmTKd69ero4B76w8O8+NJMitx+7+9pdmkrJOlEmPTkSN6aO4Mio0b8niuvbEVZBJEkSZIkSdJpYhcZE4fwNsUl0D45AUmSpEgRRJIkSZIkSaemgl18Z0cWGY93Y8gTORMVthgAACAASURBVJRwYW8SL0SSJClilEeSJEmSJEmnpK2v9KZt6nQOJuWO3jRGkiQpcgSRJEmSJEnSKemcUCwH1Xwk/brEIEmSFEmCSJIkSZIk6ZRU4exz+V7NU3n0zwNojCRJUmQpjyRJkiRJkk5N5yfQp1UTXlm0hq0UiSW+YwdS2vemfcckqpVHkiQp4pRHkiRJkiJI/D2FLLsHSVJZRHfgnikduAdJkqRTRxBJkiRJkiRJkiQpQgSRJEmSJEmSJEmSIkQQSZIkSZIkSZIkKUIEkSRJkiRJkiRJkiJEEEmSJEmSJEmSJClCBJEkSZIkSZIkSZIiRBBJkiRJkiRJkiQpQgSRJEmSJEmSJEmSIkQQSZIkSZIkSZIkKUIEkSRJkiRJkiRJkiJEEEmSJEmSJEmSJClCBJEkSZIkSZIkSZIiRBBJkiRJkiRJkiQpQgSRJEmSJEmSJEmSIkQQSZIkSZIkSZIkKUIEkSRJkiRJkiRJkiJEEEmSJEmSJEmSJClCBJEkSZIkSZIkSZIiRBBJkiRJkiRJkiQpQgSRJEmSJEmSJEmSIkQQSTqIcuXKUWTv3r1I0omyd+8ewsqVK4eOTLly5Qjbu3cPknS627t3L0XKlSuHDl+5cuUI27t3D5J0ouzbu5ewcuXKoUMrV64cYfv27kWSTpS9e/cQVq5cOcoqiCQdROVzzqHIji+3I0knyldfbCes8jnnoCNTufI5hH31xXYk6XS348vtFKl8zjno8FWufA5hX32xHUk6Ub76Yjth55xzDjq0c845h7CvvshHkk6UHV9uJ+ycc86mrIJI0kHE1IyhSGFhIbmbPkGSToTcTZ8QFhNzHjoyMTHnEbZ50ydI0uksd9MnFBYWUiSmZgw6fDEx5xG2edMnSNKJULBrJ/l5mylSo0Z1ypcvjw4tJuY8wnJzPkWSTpTNOZ8Sdl7MeZRVEEk6iAb16xOW/eFaJOlE+PjDtYQ1aFAfHZn69WMJy/5wLZJ0Osv+cC1hDerXR4evfv1YwrI/XIsknQjZH6wlrEH9+qhsGsTGEvbxh+uQpBNh59dfsWnjRxQ599yq1KhenbIKIkkHEd/sQsLWrlyGJJ0Ia1cuI6zZRReiI9PsoosIW7fqHSTpdLZ25TLC4ptdiA5fs4suImzdqneQpBNh7cplhMU3uwiVzUUXXcgZZ5SnyLpV71CwayeSdLyteXcZYfHNmnE4gkjSQbS4/HLCVixZQGHhPiTpeNqet4V/Zi6lyBlnnEGLyxPRkWkS15iY886jyMas99mYtR5JOh0VFu5jxZIFhLW4/HJ0+JrENSbmvPMosjHrfTZmrUeSjrfMJQsIa9HiclQ2wWCQpJYtCFu+eD6SdLxlLn6TsJYtLudwBJGkg4iJOY/LEy+jyM6vd/DW3JlI0vH01uszCbsu5RrOOOMMdOSuvfYawt5+/WUk6XT01tyZ7Px6B0UuT7yMmJjz0JG59tprCHv79ZeRpOPp3X8s5PPPPqZIg/qxXHRhU1R2KddcQ9iiN2YhScfT5pxPeWfRPMJSrrmKwxFEkg6hU8cOhL0+YzKSdLzs+PIL3pg5mbBOHTugH6bTDR0IS5/zIjmfZiNJp5vXZ0wmrFPHDujIdbqhA2Hpc14k59NsJOl4eX3Gc4R16tQRHZ727a6nRo3qFPlw7SqWL34TSTpeXp/xHGEdb2hP1apVORxBJOkQrm9zHY0bNaLI5pxPmPnceCTpeJgxaSy7dn5NkaSWLbj00uboh4mNrUeH9u0ImzHpKSTpdDLzufFszvmEIo0bNeL6NtehIxcbW48O7dsRNmPSU0jS8fDmK9P4cO0qitSoUZ2ePbqhw/fTnjcSNvPZcezbtw9JOtbWrXqHha+9RFjPG3twuIJIUhn063szYa9M/Rv/zFyKJB1LSxfMYeHcGYT1veVmdHT0veUmAoEARVYsTWfey88jSaeDf2Yu5ZWpfyOsX9+b0Q/X95abCAQCFFmxNJ15Lz+PJB1L2R+u5fnxjxLWr+9/EwwG0eHr3eu/aFA/liKff/Yxzz31CJJ0LBXs/JrJ4/9E2I09utHogoYcrnK//QaSdAj16tYlNzeXdevfp8iaFf+gaUILKlcNIUlH2/r3lvPkiMGE3XxTHzre0B4dHVUqV+asSmexdGkGRVav+Ae16sRS8/x6SNKp6tPsD3nyD/exe3cBRX7c6QZ+1qcX+uGqVK7MWZXOYunSDIqsXvEPatWJpeb59ZCkoy1/62ae/MNgdny5nSLJra/g7jtT0ZGrVasWr86ZS5GPN6yjfPkzaNgkHkk6Fp4a9Rs2rF1Fkdq1z+cPDw6nfPlyHK5yv/0GklQGrZJa8vaixWzdupXduwt4b/liGsbFUzVUHUk6Wta8m8GTIwazd88eirS4PJHfDLkPHV3NLrqIjRs38uGHGyiyfNF8Yn5Ul1p1YpGkU032B2v5y4j7+CI/jyKNGzfi4d8/SDAYREdHs4suYuPGjXz44QaKLF80n5gf1aVWnVgk6WjZnPMpT464j00bsygSc955jPj9g5x11lnoyNU+/3yCgQDvLM+kyLpV71C+/Bk0bBKPJB0t+/bt46mRQ1mZ8RZhDw3/HXVqn8+RKPfbbyBJZRAMBmmecAkLFi5kx46v2fX1DpYumEO16Bh+VLcBkvRDLXj1BSb86f/Yu2cPRS5o2ICH//AQFStWREff1Vclk7niXTZtyqFI5pI3KVe+PA2bXIwknSr+kf4aT44YzNdffUGR6OgajPzDQ4RC56Kj6+qrkslc8S6bNuVQJHPJm5QrX56GTS5Gkn6oVcsW8eSIwWzO+ZQiUVFRjPzDg8TG1kM/XPOES9iyZStr162jyLpV77B921YubN6SQCCAJP0Qn3z0AU+OGMzalcsIGzr4Xq6+KpkjVe6330CSyqhq1SpcdumlLP3HP/jyy6/Yt28vK5ams+Xzz/hR3QZUOrsyknS4PvnoA54dO4o3Zk4mLK5xY0aOeIjq1auhYyMQCJByzdX8c80aPvtsE0XWrVrOB2vepcZ5tQjVOA9JilSbcz5l6tOPM+v5p9m3by9FatWqycN/eIgG9WPR0RcIBEi55mr+uWYNn322iSLrVi3ngzXvUuO8WoRqnIckHa4v8vN4YeJfmPbX0RTs2kmRc845m4f/8BAJCZego6f1la3YsmUra9eto8jHG9bxztvzqFw1RM3a9ZCkI/HqtL8z7pH7yc/bTNh9vx7EjzvdwA8RKPwGknSYPv88l98Ne5Bl7yynuCuu60iLq9rRsEk8knQoa97NYMmbr5Cx8HWKu+bqZH4zZDBnnXUmOj6GPfh7Zr48m+Iuvrw1Sde0Jz7xSiQpUnywZiVLF7zK26+/THGXXdqc3wy5j/POi0bH3rAHf8/Ml2dT3MWXtybpmvbEJ16JJB1K9odr+ceC11gw5wX27d1LWFzjxgwd8msaNmiAjo3xE/7K2HETKK5BXDNaXXsDzZOuIarimUjSwWzO+ZSMha+zcO5LbM/bQljVqlUYct+vaX3lFfxQgcJvIElHaOy4CYyf8FdKO7daNA2axPOjuvUJVT+PimeeBYEAkk5fhYWF7NzxFXmbc/jkow/5YM0KvtyeT3HlypXjzjv+P3r9tCc6/l6a8TKjxzzBF19+SXFnnlWJCy68hPPrXUD182pyVqVzCASDSNIJV1jIrp1fk7flcz7N3sCHa1aybWsupfXr+9/c1r8vOr5emvEyo8c8wRdffklxZ55ViQsuvITz611A9fNqclalcwgEg0g6jRVCwa6v2bZ1M59tzGLD2lVszvmU0v6rZw9+8T93oWNv8ZKlPD76z3y4IYvSGje7lDoNGhMd8yPOrlyVYLlySDq9/Wt3Adu3bSXnk2yy1q/mk4/ep7RrU67mnrvvJDo6mqMhUPgNJOkH2JCVxd+fmcQrr85Bko5U15905uaf9SEm5jx04nzxxRf89e8Tmfz8VPbs2YMkRbL27dpy0896UT82Fp0YX3zxBX/9+0QmPz+VPXv2IElH4uqrkvlZn15cdGFTdHylTXqOZyc/z+bNW5CkI3FxfDN69/ovrkpuzdEUKPwGknQUfPLJp7wy5zXS0xey/v0PkKRDuejCplx1VTId2l1P9erV0cnjy6++YvYrrzL/zXQyM1cgSZGi0QUNSU5uTfu213P++T9CJ4cvv/qK2a+8yvw308nMXIEkHUq9unVJbn0lbdteR8MGDdCJ9cqrrzFv/nwWL1nKv/61B0k6mBrVq3PlFa1o0+ZaLm2ewLEQKPwGknSUbd68hbXr1vHxxxvZsmUrO3ftpLCwEEmnr0AgwFlnnUWN6tWpW7cOTZvEUbVqVXTy+2rHDlavXsNHH2WTu3kzO3bsYN++fUjSiRYIBDiz4plUr16NOnVqE9e4MTVqVEcnt6927GD16jV89FE2uZs3s2PHDvbt24ek01mAihWjqFYtRO3zaxPXuBE1a8agk8++fft4771/8mFWFjk5n/Pll1+yd+9eJJ3eKlSowLlVq/KjH9XiggsaUj82lmMtUPgNJEmSJEmSJEmSpAgQRJIkSZIkSZIkSYoQQSRJkiRJkiRJkqQIEUSSJEmSJEmSJEmKEEEkSZIkSZIkSZKkCBFEkiRJkiRJkiRJihBBJEmSJEmSJEmSpAgRRJIkSZIkSZIkSYoQQSRJkiRJkiRJkqQIEUSSJEmSJEmSJEmKEEEkSZIkSZIkSZKkCBFEkiRJkiRJkiRJihBBJEmSJEmSJEmSpAgRRJIkSZIkSZIkSYoQQSRJkiRJkiRJkqQIEUSSJEmSJEmSJEmKEEEkSZIkSZIkSZKkCBFEkiRJkiRJkiRJihBBJEmSJEmSJEmSpAgRRJIkSZIkSZIkSYoQQSRJkiRJkiRJkqQIEUSSJEmSJEmSJEmKEEEkSZIkSZIkSZKkCBFEkiRJkiRJkiRJihBBJEmSJEmSJEmSpAhRHkk6Rvbt28fGjZ+wZetWdu7cCYVI0ikrEAhw1llnUr1GdWqffz7SgWzesoXPcz7nyy+/Yu/evUjSqeyMCmdwbtWq1KpVi7PProQknU42b9nC5zmf8+WXX7F3714k6VR2RoUzOLdqVWrVqsXZZ1fieCiPJB1F7/1zNekL32LZO8tZvXoNhYWFSNLp5owzzuDCpk1JTLyUq5Jbc0HDBuj0lJ+fz7z5C1iy9B+sXLWKbdvykaTTUZ3atUlIuJgrWiVxVXJrJOlUk5+fz7z5C1iy9B+sXLWKbdvykaTTUZ3atUlIuJgrWiVxVXJrjpVA4TeQpB9o9itzmDr9Bf75z9VIkkq69NLm9OjWlWuuTkanh/Xvf8Dk56fw8qxXkCSVVL16NX7SpTO9ftqTs848E0mKZOvf/4DJz0/h5VmvIEkqqXr1avykS2d6/bQnZ515JkdToPAbSNIRemd5Jk/8ZSyr3vsnpZ111lnUq1eXGtWrc9ZZZxIggCSdqgoLC9nx9Q5yczezIesjdu/eTWktLk/kjtt/TlzjRujUVFCwm9F/foLnp0zjQOrWqUPNmjFUrlyZ8uXKIUmnsoLdu9m2bRsbN37C5i1bKK1KlSr8/NZ+dOvaBUmKNAUFuxn95yd4fso0DqRunTrUrBlD5cqVKV+uHJJ0KivYvZtt27axceMnbN6yhdKqVKnCz2/tR7euXThaAoXfQJKOwFPjn2bc+KcprkaNGlybcjUtW1xOXONGSNLpatV7/2Tx4qW8Pm8+X3zxBcX94n/u4r969kCnlmXvLOf3I0ayceMnFJfc+kpaX9mK5gmXUKlSJSTpdLQpJ4eMjHd4M30hq1evobirr0pm8L2DqFy5MpIUCZa9s5zfjxjJxo2fUFxy6ytpfWUrmidcQqVKlZCk09GmnBwyMt7hzfSFrF69huKuviqZwfcOonLlyvxQgcJvIEmH6Te/fYA5r80lrHq1avz0pzdyQ/t2SJL+Y9++fbzw4gzSnp3M119/TViP7l0Z+L/3oFPDy7Ne4YHhD1HcDe3b0b3bT6hZMwZJ0n8sz1zB5CnTePfdlYTVq1eXBx/4Pxo0qI8kncxenvUKDwx/iOJuaN+O7t1+Qs2aMUiS/mN55gomT5nGu++uJKxevbo8+MD/0aBBfX6IQOE3kKTD8Kt7h/DmgnTC2re7np/f2p+KFaOQJB1YXt42/jJ2HOkL3yKsc6eO3HfvIBTZXprxMg/+fgRhdWqfzx23/5xLLo5HkvT9XprxMk88+RRhNWpU59FHRtKgQX0k6WT00oyXefD3IwirU/t87rj951xycTySpO/30oyXeeLJpwirUaM6jz4ykgYN6nOkyv32G0hSGQ1/6A/Mee11wv6/2/rz3zf1oXz58kiSvt+ZZ55J6yuvoMjKVe9RZN369ewuKODyxMtQZFr41tsMvf93hLVscTnDfnc/tc8/H0nSwcU1bsQlF8fzzvJMdu7cyddff807mZm0bXMdUVFRSNLJZOFbbzP0/t8R1rLF5Qz73f3UPv98JEkHF9e4EZdcHM87yzPZuXMnX3/9Ne9kZtK2zXVERUVxJMr99htIUhk89/wU/vb3iYTdc/eddLyhPZKksouPb0ZUVBSZK96lyLsrV1Gn9vk0bNAARZbc3Fx+MWAQu3YVUOSKVkncP/Q+zjjjDCRJZRMdHU3z5gksWbKUnTt3kp+/nY0bP6HNdSlI0skiNzeXXwwYxK5dBRS5olUS9w+9jzPOOANJUtlER0fTvHkCS5YsZefOneTnb2fjxk9oc10KRyKIJJXBR9nZ/PFPjxN2y3//jHZt2yBJOnw9unel20+6EPbInx7niy++QJHlT4+NJj9/O0XiGjdi8L2DkCQdvth6dbn3VwMJe3NBOtOmv4gknSz+9Nho8vO3UySucSMG3zsISdLhi61Xl3t/NZCwNxekM236ixyJIJJUBk+Nm0DYFa2S6NmjO5KkI3dr/1u46MKmFMnPz+epcU+jyLEgfSFvzHuTsLvvuoNgMIgk6chcdNGF3P7zWwl78qnxfL1zJ5J0oi1IX8gb894k7O677iAYDCJJOjIXXXQht//8VsKefGo8X+/cyeEKIkmHsOq9f/L6G/MJ69f3ZiRJP1zfW24m7Pmp0/jss89QZEib9BxhN9/Uh/qxsUiSfpjOP+7IxRfHU2T79u1MenYyknSipU16jrCbb+pD/dhYJEk/TOcfd+Tii+Mpsn37diY9O5nDFUSSDuHFl2YQ1qN7V2rVrIkk6Ydr2iSO665NIezFl15GJ793lmfy7spVFKlWLcRPe/ZAknR09OzRjbAXXnwJSTqR3lmeybsrV1GkWrUQP+3ZA0nS0dGzRzfCXnjxJQ5XEEk6iN27dzPntdcJ63hDeyRJR0/HG9oTNmfuXHTymzv3DcJuaN8OSdLR0zzhEpo2bUKRLVu2siB9IZJ0osyd+wZhN7RvhyTp6GmecAlNmzahyJYtW1mQvpDDEUSSDmLR4iX861//osjliZdxXnQ0kqSjJ65xIxo2qE+RnJzPee+9f6KT21tvLyLs6quTkSQdXVcntybs7UWLkaQT5a23FxF29dXJSJKOrquTWxP29qLFHI4gknQQK1asJOyyS5sjSTr6Lru0OWEr3l2JTl4bsrLYvGULRRrUj6VWzZpIko6uxMRLCcvMfBdJOhE2ZGWxecsWijSoH0utmjWRJB1diYmXEpaZ+S6HI4gkHcT6998nrEmTOCRJR1+TJnGErX//fXTyev/9DwiLi2uMJOnoqxkTQ43q1Sny8caNfPXVDiTpeHv//Q8Ii4trjCTp6KsZE0ON6tUp8vHGjXz11Q7KKogkHcQnn35KWJ06tZEkHX11atcm7JNPP0Mnr08//YywOrVrI0k6NmrXPp+wzz77DEk63j799DPC6tSujSTp2Khd+3zCPvvsM8oqiCQdxLZt+RQ5++yziapQAUnS0RcKnUvYtm3b0MlrW34+YaHQuUiSjo1zzz2XsG35+UjS8bYtP5+wUOhcJEnHxrnnnkvYtvx8yiqIJB3E7t27KVKhQgUkScdGVFQUYbt3/wudvHbv3k1YhQoVkCQdG1EVKhD2r93/QpKOt927dxNWoUIFJEnHRlSFCoT9a/e/KKsgkiRJkiRJkiRJUoQIIkmSJEmSJEmSJEWIIJIkSZIkSZIkSVKECCJJkiRJkiRJkiRFiCCSJEmSJEmSJElShAgiSZIkSZIkSZIkRYggkiRJkiRJkiRJUoQIIkmSJEmSJEmSJEWIIJIkSZIkSZIkSVKECCJJkiRJkiRJkiRFiCCSJEmSJEmSJElShAgiSZIkSZIkSZIkRYggkiRJkiRJkiRJUoQIIkmSJEmSJEmSJEWIIJIkSZIkSZIkSVKECCJJkiRJkiRJkiRFiCCSJEmSJEmSJElShAgiSZIkSZIkSZIkRYggkiRJkiRJkiRJUoQIIkmSJEmSJEmSJEWIIJIkSZIkSZIkSVKECCJJkiRJkiRJkiRFiCCSJEmSJEmSJElShAgiSZIkSZIkSZIkRYggkiRJkiRJkiRJUoQIIkmSJEmSJEmSJEWIIJIkSZIkSZIkSVKECCJJkiRJkiRJkiRFiPKcavbks2bhbOa9Mo/pyxaxev4acvi3hkmkJDQnqX0HenfqQJPqSJKOpT3bWb94Lumvp/Pyin+wduF6cvlW3cuSiY9PpM3Vnbi+TTzRURxCBqNCbRlOWUXTqHUcMaFGJF7Xhh7t2tCoGt8r4+EQbR+imE6Mfe9vdK/FIWU8HKLtQxTTibHv/Y3utZAkqaSCXFbOfY2Zb84l4/11rF24nlzC6pJ4dV0aN+tI8nXJtLm8EVWiOKSMh0O0fYgyi26STNx5VWiU2IY23TvR5oIqfJ/caTcTd+tMihv8Wh4DLuOQcqfdTNytMylu8Gt5DLgMSZKkE2r7y3cTe9NEDiT6Fy/y7tBkotDJJZept8Rx20sU04mx7/2N7rXQaag8p5Cc+aNIvW0g0z/gwD5YzLwPFjNvyhiGE0PKL0Yx+ne9aHI2kqSjajvrp41k0OAxpOdyQNnL0slels7MCaMguhHdU0cw7PZkostzlOSyfmEu60kn/aVxjKIufR6fzMO9GxGFJEnH2Z5c0p/4Jf9z/0yy+T7ZZLyZTcab6Ux8HIhuRPfUEQy7PZno8hw1uWvSyV0D6W/OZNzDd1O392NM/n0fGlVCkiTpNJDL3GkT+T65aQtZOiiZ5CgkncSCnBJ2kfnHG0hIGcj0DyijHOb9sTdN2wxn8VdIko6W7RmM6tyClreOIT2Xssldz9T7u5DcbRQZ2zlGspl414+5aexKCpAk6fgpWDWOm1vG0eX+mWRzGHLXM/X+LiR3G076Vo6Z7LS7+fGt41hZgCRJ0qlvw2tMeYnvlzuKl+dvR9LJLcgpYNdbw+j3v7PJ4QgsGULXOyaRhSTpB9uazvAebRm+MJcjkbtwOG17jCJjB8dILnN//UtGZxYgSdLxULBsFD++ahAzN3DEcheOostPR5Gxg2Mm99VB/PLPKylAkiTp1Jb91lTmcnDjZqWzHUknsyARL4u0B4eTSUkxnQaTtngD23YWUlhYSGFhITs3rWbWgx2IoaScZwYwfv4uJEk/RDZTB3Zh1DJKiSb59seYsyKLTXl55OXlkZe7ibVLJ/NA57rsZ9lwhoxdSQFlMZg5eXnk5eWRl5dHXl4eeXl55OVuImvFHB67PZloSstg+ITX2I4kScfYx1O5q89wMigtmuSbRvC3BZlkbcojLy+PvLw88jauZcnzI+jTOpr9LBvOL/+cQQFlM/i1PPLy8sjLyyMvL4+8vDzy8vLYlJXJnMdTSY5mPxkPjOO1rUiSJJ3CVvLy+HRKuGswg5tRUtpc0rci6SQWJNJ9tJjZr1BSjzQWzRhGr5axVK3IdyrGNKHDvbNYNLYDJeUwfMo88pEkHancab/htpcoKTqZwbOX8uLwPiTWqUIU/1Y+iugL2pD69BKW/Kk70ZSU8cA4XtvKkSsfRZU6ifQZPpln7k1kP2kL+cd2JEk6hrKZOPA2puZSUnR3Hnt7KS/+qT+dmtWlShT/USmaRtf157GXlvLiLxIpbeVDz/DaVn6QqCp1Sez9AJMnDiaR0iayMGM7kiRJp6xVC5myihJSr7mTNv8VT0kTmfpmLpJOXkEiXU4W0ympQ/skYvl+sT8bwABKeWIRaziE/CxmPz2Q/ilNqRkIEAgECNRsyrU33smoZxeTs4eDWvxAgEAgQCAQIBAIELhxEjlA1ivD6Z/SlJqBAIGaTbm273CmL57CnYEAgUCAQCBAIBAg0Hc6+RzMLubdV5NAIEAgECAQCBAI9Gf6Fg5sTw6Lnx3FnTe2on4gQCAQIBCoT6vr+zPw6dlk5XNwS4YTCAQIBAIEAgECgW5M+gT4aDbD+15L05oBAoGaNE3pz/AX1pBPSfkfzGPSw3fS7fpW1A8ECAQCBAIBAoGaNE25lm53DGH8K2vI34Okk11BOk8NnklJ0fT/098Y0LIK3y+KRjc9zON9oylpIhNfzeaHiyLx7sGkUtoMVm5AkqRjZvurYxj2OqV0YuxrY+nTpAoHV4XkoWN57DpKmcjUN3M5GqIuu5PBd7GfGWuykSRJOjUVkP7i46ykmOgBtEmKIv7KHsRT0szJr5FN2W3fkM7Uxwdxc9e2JIRChEIhQqEQoVAcLTt34eaBw5n4+nq276HMcpdNZczAm+lyfQKhUIhQKETosrZ0uWUQY6ZlkFtAmeUum8qYgTfT9rIQoVCIUChEwvVduPv+iczdsJ1DyXg4RCgUIhQKEQqFCN0ylVy+lbtsKmMG3kzby0KEQiFCoQTa3jKIMS+vZPseymZ7NnPHDuLm6xMIhUKEQnG07Hw3o6ZlkLsHaT9BIl3FiiRQ0uyFmeRzEBWbk3I7pawh6xO+xy7WPDuQa5vU54a+oxg/fw05/FvOGuZNGcPAXq2o2aQbo97K53BkPdubVh2GMH7+GnL4Rs4a5j09hG73b6XVvTGU8PRs5m3h++1axLynU+A/JwAAIABJREFUcyjhlg6kVGc/+W+NoluTmrTqNZAxUxaTRVgWi+eOZ1TfG6jf5FoGPruGXRyGjybRO+kGhjw9jzU5fCOHNfPHM6TrnUz7gG/tyWL6/15LkwuupfegMUyfu5gsisthzfx5TH9iOP07NOXcJt0Ys2IXkk5eBYvnMiqXkq4bQmq7KhxaFdrcfBfJlyXT564HeOzvc1iyNou/967LUREVx8WdKSUX9iJJ0jGSy9zJ48ilpDZ/+h3d61BGdelxRyp1L+tE/1+OYOzzc1iyNoux3aI5OqKIi+9Eabl7CpAkSTolFSxlYVouxUX3bk2LKKBZa3o0o6TXp7JwA4e2J5uZg7vQ4rIu3Hb/OGa+mUE2xeWyfmE6MyeM4u4bWxLb8mbGrSrgoLamM7xzHHHX38bQCTNJX5bNdzZkkP7SOIbe2pa4i7sw/M3tHNTWDMbckkDc9bcxdMJMMjbwnexl6Ux8/G56XtaCLoOnsn4Hh2Xn9pWMuyWBuOtvY+iEmWRs4N+yyXhpHENvuprYDoOY+zEHlf3qULq0SKDnr8cxc1k238pl/cKJDL+1LXEt72bi+/lIxQWJdHEJpFDK06l0+99JZG7he1Slw58LKSwspLCwkMLCQgoLp9HrfA5gF4sfuoGUXqOYl8PBfTCdga2b0PvZLMrk/ZGk9ppEDvtLaN+CXj++kxiKG0/a3By+z66FsxmeQwkDftqBqpSUNaU/zVsPZPoHHFzOPEb1akrKA4vZRVlkMfKu3kzKYX+XdKB5Q76Rw/S+rej2x3nkUEYfTOfO9v2Y9BGSTlIrF4+htOSOralLGTVL5cXXXuSx/0ulT8dEGkVXIQpJkiLUZ+nMfIlS+tDnhrocjqirHyDztb8x4t7+dL8ukUbRVYhCkiRJR6Jg8VxG5VJMND+7ugVRFImn9X/FU1I64+as5OBymXlXW25+Ip1cymjDTAb1uIupH3NgH0/ltiu6MGphLoeUm86orjcyalkBB/TxTO5u25ahL2VzcLmkP3EbLX8yiowdlM321xje42oGvZTNQS0bR8+BE8nmwLKn3UbbXmNIz+X7bZjI3Z3u4qkNSN8JEukqtqLb7xIoKYd5f+xN8xo1afrjOxny9HQWr81h1x4OW/7MgXS9bx45lFUOk3r1ZtQKDm1FJpkcSAK9r0mAlh0YeAklTH9mFlkcyC4WzR1FCTGD6dC6IiWsGEXvG8eTRdkt/k1XBs7M59AyyVzBASXclEICsGv+aFKfyeE/kkidsIgNXxZSWFhIYWEhO7dt4I0HOxBDMTmT6P276eQj6eSTzboMSommdbO6nBQK1vLuS5QST8UoJEk6JgrWv8tMSumYTHw1TiIFrF05k9Liz45CkiTp1LOd16aNoYToW7m+dRRh8Vf2IJ6SVj63kJV8v4KFT/HLybn8RyL9H59D5sY88vLyyMvLY1NWJi8ObUM0xeRO5baHZ7KdUgpWMqr/bUzN5TBkMLzPUOZupZSVjOl/MxM3UHbLhvOz++eynTJ4cypTl1E2rw9j4sIC9rNhIoNunUouZZCbQcYqpO8EiXgVSfrFGIa15AByWDNzDMP7dqNVk5qceUaA+kn9Gfj4JOatzeeQdi1mzG/GkENxMXR48A02fFlIYWEhhV9u4I0HOxBDcYsZ+Nh08jlCl/Qm5RK+kUDXuztQwiuTmPcB+9u1iNkPU0LC/3YjpSLF5DDpwYEspqSku9JYvnknhYWFFO7cxvJJqSRRXA5jfjOeTI5UAr2vSaDImiXjyaGYn9zJgFuSiD2b71SsGkvKvdNIuzeJlB6pDJ4wjTcyN7Dtz12piqSTTy7Zb1JKC+qex0mggIzHhjOG0lrTrBGSJB0TOZ9/zH6a1aUuJ4+CZaMZ/jj7ad2kMZIkSaecrenMTaOE6H7JJFJMs470v5qSVk1h4Sq+17plz5BLMR1vJbV3InUr8Z2oKnVJ/sXfGfuLRJI792fA43/jxQWZZI3sRBVKyp46nOHLKOmy/oxdkMWmvDzy8vLIWjCW/pdRUu44Rj6/kuJyp41i6DJKuqw/YxdksSkvj7y8TWQtGEv/yyghd8JwJq6i7C7rz9gFWWzKyyMvbxNrXxpMm2hKyeWZZesoqYD0tGHMpZToZAb8fQlZuXnk5eWx6f03GXt7MtFIJQU5FZydxOBnZ5HakkPKWjKeUXf35tom51KzWW+Gv7CGfA5s18JpDFlBCUmPzGbWvSnEns23zo4l5d5pTB+aQAlPj2faB5RJ0l1pLN9cSGFhIYWFhRRmDiCBb8W27koHipvHmJmZlJY/dzqjKC6B3m0SKOGDWYyfQgkxt89i9mO9SKhekf+nYlUSfjqatL93pYQVI5k2fxdl0jKVtMxtFBYWUlhYSGHhcgZcwv+za08OJWStJiufA6hIyoOLeOP50Qy7pSspl8RStSKSTkafZbOek8yO7eS+P5dxd/2Ytg9lUFr0L9rQIgpJko6J3I9nclLaU8D23PXMHXs3P75+OBmUEj2ANklRSJIknWqyX53IRIqL5652iZRUl9ZdkilpJY+/msH3KdiTSwnZ68jezgFEkTx0Di8+PYLBvTuR3KwuVaIoZSUvj51LCdHdGTtuBN2bVSGKb1Vp1p0Rf3mMNpSU8dxCVhKWzWtpMykhuj+Tnx1B92ZViKJIFFWadWfEuLF0oriVPP5iOgWUQXQqL74wgu7NqhBFkSiiWw9g7Mg+lJb76edsp5iCpcz9Yy4lJTJ44mQGd2xElfL8P1HV4uk+fDLP3JuIVFyQU0W9DoxeuIk3HkulQ0PKJOe9SQzp2pQmPx5F5lfsJ3PhKErqx8CfJbC/iiTdlEoKxc1mXmYOh3TJMEaN6EVCdQ6sYTf63UIJmX+fRybF5bPolTGU0CaVrpdQQtbi2cyjuAQG3taBquwvtkcqAyguh/FL1nBoCQz740h6XVKVA6l4dgIlrBjOtU2a0nvQeKavyCJ/F5J0GIbTNhQiFAoRCoUIhUKEQiFCtWOJa9GTQWkZ7Ce6Ow/3SyYKSZJOEp9N5eZQiFAoRCgUIhQKEQqFCIVChEIhQqEQoVCIUChE6Jap5HJow68PEQqFCIVChEIhQqEQoeiaxMa1pOevJ5JBadF0/8OtJEchSZJ0islm4UtzKaFZD1o3Yz91r+xOMiXljn+N9AIOKOrseEpYNYouLVpy2/0Tmbkqm+0FlN2qhUxZRQnR/W6lex32V781PXomktw7lQce/xsvLsgka3Yq8fzbxxnMfZMS4lP70KYa+6vTiX53UUJuWgbrOLTk+/qTXIn9VIlPphOlbN1OAcWszmAKpdw2kDsvi2J/UST2vZM+SP8R5FRSPoaUu0Yz6/2dbMqcxrihqXRtGcuh5MwcSIc7JpFFcVmsXkIpsdSszoE1bEoKJU1am8WhJNzUgaSKHERVUn6SSgkrRjJ7Cf+xZR7Tn6CEDj9NIZbidpG1ajolxVKzOgdWsSnNe1BCTuYacjiES3rToWVFvk9Cp1Q6UErOGiY93J9uCfU598wA9ZO6cedvxjN9RRa79iBJR1Eigyc+TqdaSJKkYhLvfYbHO0cjSZJ0yln1MuNep4T47q2J5wDqt6b71ZSUO4q5iws4kPi2/WlDKbnrmfr43dx8VQKxNUMkXH8zgx6ayMxV2RTs4Xtt/zSblZT042aNOLC6dH9iDi8+/gCpvTuR3KwuVSrxnYIN7zKTkurWiuHAooiL70QJuStZ/xmHlNioLgdUMYooDi7343XkUlL3pIuJ4ntUiye5I9J3gpySKhJzSVf6/W400xZvoPBfO9m0ZhHTJgwmtUcSsewv55neDH8hn//IIWcupQyhVSBAIBAgEAgQCAQIBAIEAgECgVYMoZRVWeRwcCkXNeFQqrbpyuAYislh9Ix57OJb+QtnM57i+tGvUywl5ZPzEaVMp3ftAIFAgEAgQCAQIBAIEAgECARq0nsKJU3JIotDaJNAEw6iYT9GP9+PWL5f1pLpjHmgP90S6nNm7ab0fmA6a/KRdLKqFk2d/789eIHPuiAUPv57nkGbGYc558O8sYaCgrJEmTJyU2ZTQyfJRVI4+knJz2sop9PMT8dBHQUkudT7Cug5BJYKmHIRm47LDGlTwCDA0eGmwhmZzAX/nEawAp7XlQ/yjNvAARv+vl+avkjOYCa9/jyFXRORJOlYSm9XwH7+soNamqBILoN/tpTnv59FIpIkSSefikUzqGBfmfS7OpMDSyfnG7nUN/GlMmo4gHYDGf3zgaRzcJXLi5k8Zgh3XNWFMy/uxt1jitlQw35qt79PfW1SW3M0arZtpr7ib19ISkoKKSkppKSkkJKSQkpKCikpKVz47WLiFVP5HocRISmRA4ukcQGHsauW+i44O8LBpRHJQNorzOdBiyTSLsym97dGMOH5xWz88xqm3ZdNfVOeLaGK4+u0VkkcVlIeN3wnjX1V/Xwhi3fysQ9YWDyFON/qSV4qJ0arU0ji0DL6TWbNppcZe2tH0jiMqrVM/2EfOnW8gXGrdiKpCUpMJjlCPcVUvscJFemYS+7VuQy8bzSTFqzjzReH07djaxpfDR+8jyRJe0XO7sB+Fq1mPU1ApAO5V+eSO2Awo382n3VvzmF4nw60pvFVVVciSZJ0Yi2jdGIF8SoYdlUKKSkppKSkkJKSQkpKCikpKaSkpNDlu2Xs58liyrZxQOm9HmPpqucY3qcDEQ6jegMzR91Btyv6M3F1Lc3bFaS34ThK5JQvIe0Vpjl7dzp9QiFCoRChUIhQKETogYXs5DCSO3LbYxMZewnxZqxhE8dTHmmpNEh2wf10YR9VIykp3wkfLGbhz4lTeGtPkjkx8tLSaIikL/ekcPoatuzYworZkym6pycd0zi4qhLu//pQFv4FSU3OBWT1Zz/layppsJpSht0yjKnLq6nlSBQxPwgIgoAgCAiCgCAICIKAda/PYc7sOTz20CD6do2QyKFF2hYQr5jK92iAWmq2UU8m6WchSfq86ng5g6hndSkVGzmws/ryVBAQBAFBEBAEAUEQECwo4mgVLQgIgoAgCAiCgCAICIKAYN1S5syew5zxwxnUJ4tIIocUObsD9VVUVtMQtX+pIF4B6WchSZJ03NSWL+Bn1TSCqZQuqeFgEtvmM/hnS1m3ZR2Lnn6Mwjvz6RDh4KpLGdZvBGXb0ZHYhbRXmObsnAwuTSPeM0tYSUOcyZntqWctm97lE0kkXUK8m6exMRolGo0SjUaJRqNEo1Gi0SjRaJRoNEo0GiUajRKNRok+fxtpHEoypyTRMJf0ZnA+caa9upgPli1kIvtIK6JnThL7SyI5lXqKWByNEo1GiUajRKNRotEo0WiUaDRKNBolGo0SjUaJRqNEo0Vkc2jJXzqFI5KURpeb72LE4y+zZkuUHVvWsHj2ZIru6UkG9VSNY8rcKiQ1NYl8JWsg9ZW9VE4lDVNTXszEVyYy5NoLObNrfx6YVMqGbRxXp7RgPzt30QA7YDeSJH2q9eXkDKCeMibPWkYtzUyLJPazawcNsgtJkqQTqJY3XhlHNY1j6tPFVHIYiREybxxI0djnWLouYMu6pcx/+jEK78wnnXqqJzL1lWpiEk9tQ30VldUcTPWLQ+jW6w4eGDWV4vIKKmtqiUk8tQ31FZYEBEFAEAQEQUAQBARBQBAEBEFAEAQEQUAQBARBQGFXjqnEU9tQX8U7lRxcJZVrkfYK06x1pMvNxKuawLTiDzi8LWx5i3oySEvlEx3pkk+8F1awaScnSAZ5t/ZkX1U/L2FE8Tj21eV7fchL4gCS6dglj3hLWPM2TUpSWkeyb76LEY+/zJoFhaQRb/q6TUhqelrnFDAoQrxXRjBxXg2HtX0ZE8ZMZa+NpUz+QX9ueryMWo6fSHom9T1TWkYth7GtgrKXiHdjOhEkSZ9frbn2lkIixKsYNZQJy2tpVs5Kp4B4xbPKqeRwKvjtPOrpQPpZSJIkHR/bFjBzPI3nlWLKN3JEEiMdyLpxIEVjn2Pp7MFEiDdzQyUxrTtkkku84op11HIgtaxbMZUN5cVMHjOEO3pdTZeMMxm2qJY6rTtkkku8ZRsqaUpan51OJvGKX6ughoPYVkHZS0h7hWnWksnrV0Qa+6pi4t2Dmf6/HNKmZ8YydhXxvt6RjCQ+kcSl2XcRbxzjfr6WEyWj4C7uYh9V4xg3nn10YUB+Fw4mI6snXdjXQoZOKuEDjp+dH1Sxdulspoy5n0HXdqfTAwvZyYElnXEmZxIvrUUSkpqg1vkM/Lcs4lUz+bt3MG5pDQdXSenwoYxbTT2Z3PeNXBI5jtp2oC/xqn86kgnLazm4WpY9OYGpxIt0TicdSdLnWWLOtxnRi3qWMXLg7UxcWcPh1GwsZdxPfsYJd1YHMiPEe2UEI2dVciiVsyYwfjXxel1AOpIkScdHzZJSphIv9/+uJAgCgiAgCAKCICAIAoIgIAgCgiAgCAKCYBNPDaCeUma+Vsm+amuq2bC8mKnjhzGk93V0+1EZtRxY4ulppBEv0iKRvdplkt+ZeOMnMmMj+3uvmCnjqWcQuV0S+Yd2meR3Jk7ZI5MpraHp6Hw5BRHiTZvA5OW17K+WZZPGMRXpU2GauaQefbi/G/GqpjMg+xoGjS9hbdVO9tq1k6p1JUy8+xq63z6dKuLd9e0+ZPCp5PzbKEwjTsl38rhhVAlrt7LXzv9dwsTb29Eprw/3/nAi0+euZNMHO2l0qXn0voeDyx9M70s4uEt6M/jrxKkacwM9h0xhybs7idm5dS2zH+xOu+xrGPTAOKa8sIS1VR+wk8/o7Sn0Oe1MOmX3YdAD45hSuoS1YwZww/ems7JqJ3vt2smmpVO49577WUm8AV07IqlpyrxzDEVdiVddxsieV3PHmJks21xDLZ+oraFy+VSG9bqO/pOWUV/kziIGdub4iuTS784I8ZYx8tpu3DG+lA3Vtey1q5aazcuY+oObuG7UMuJlct/1WUiSPu8i9H1oEn0jxKsuZdg1V/CN706m9K1qamrZq7amkoqXpjLsW13I6NqfkfOqOfEyyf+3LOJVM/Pb1/GNoqks21xDLTG11FRvoPSn/bnu2zOpJt7APvlEkCRJOh6qKZ01lXj59L0ynYZpTe51A6mvbMpLVPCJjVO5PeNCul17B0N+NJGpi5axYfzd9C+aSUV1LXvtqqVy+VQeKBxGBfH6dbmAT2Vy4935xCtlSM/+jCuvpHYX/1C7uZSR9wylmHiRf7+R3NZ8IpMb78onTvVE+vd7gKnLq6nlE7U1bHhpJNd1vY5v3DeMidOKWfZWNTW1HAdZ5P9bFvGWMXJgf0a+tIGaXfxD7bYKZv7gJq4bU4G0rxY0e10YPH4Es7KGsoR9VC1kypCFTBlCw3Qby+Cbk4nzpTwGj+7NuNtn86kqSh68gZIHOYBNrH11NhOpk83YlYspvIRGlExevyLSnhhJFfvreWseGRxKBgMeLGLi3JGs5FNLxg+i+/hBHMiUpQuZQp00Bv9qLRMKkjlq5w9g6LCJlAxfyaeqWPjTAVz6Uw6v2wgG5CQhqYlKzKRw8lNU9r6DqRvZRyXFo+6meBQN07WIZx7KpzXHW2vyh4yg70t3M7OafVRS/KP+FP+IBon0L6RfZyRJgrZ9GT+1kuqBIymrZh/VlD39AGVPP8CRiZB7aQdac3xl3j6CoheuY+Ry9lFN2RNDKHtiCA3StYhB+a2RJEk6LjYuYMaLxPtaATntaLDW2fkMZCpT2cfqGZSvHkxmZ6BdP+7//mRKx1TwqWrKnribq5/g8LoW0S87kX2l9y2i6KlSRi7nU9WljOzVhZEcQqQvY+7KJZFPpfe5n8IppYxbzaeWT2bItZMZwgFsXEbZtInUidz5HG+Mzac1x1bm7UUM/n/fYGI1n6ouY9zt3RiHdGhhTgJJXYsoWVBENkep22BefraQLuwv41/HMutbGRyZNG6bPo3CS2h0ST1u4N40DuAu7irI4HCSrhzKxEfySOPIZD88m7EFyXw2SWQ/MIWxBWkcsbSejH2ikC5JSGrK2hbw2Pw5FOZEOCpdB/Hc5EKyTuXEaNuX8VOLyOLoRHKKeGZsAREkSfqnxK6FPFc8iUFd+WzaFTD8xTLm3JdJIsfZqVkUTp5E3whHp91AnppcSGYiktRgS4aHCIVChEIhQqEQoVCIUChEaPgSDmbJ8BChUIhQKEQoFCIUChEKhQgNX8KBVTH9lhChUIhQKEQoFCIUChEKhejzbBWSmq/K12ZSSrz8XjmkcwROzyV/APVUMH7eMv4pkawhjzH8+ghHLJLP8HH3kplIvMRMCidPom+EI5DOwEeLKDiLeKdmcf+4InIjHJmuRTzzUD6tOQ5OzWXo1CKyaIgIffsXIMWEOUkk549g4dppFPZIo+Ey6Pn9aawpnUDPL3MQGfR+cgWLR/cmgwZIy6Nw9mKm3ZrBsZFNz+91YT/f6kleKg2QRPZ/vMzC6YXkpdEAGfR+bAUlw7JJohF8qQuFsxcz69/zSKNh0noUMu3VWRRekoSkZuD0XIpmlTHnob50iNBA6RQ8NId1JaPJb8sJldi1kF+9MYnBOREaLp38+55i/qxCsk5FkqQ4ie37MrpkHXN+PJDcCEcgQofrBzH6+ZVsWv4Ug3MinDBt+zLp9fkM75VOw0Xo0Gc0i379GAVtkSRJOk4qeGlKGfEK6NcjnSPTmtzrBlJf9ZQFlNXyT6dmMvjp+Tx1Ty4RGiaSM5hJxU8zuHMiB9S2L5Nen0NhToTDiuRTVLKIx3qlcyCJXQt5rngSg3MiNER6r9EsmlFI1qkcN4ldC/nVb0ZT0I5DiJA/8nnG35WJFNOCk0jShbcxduFtDF23kJLSEhYWr2Tx6oWsreITaXTs0YmOF2aTl5NHXn4eHVNpgGSyvz+Ljd9aS0nxFGY/u5LFqxeytop/SLs4j05ZXbit3130ye9IcguOqS43DybvgUEs5FOFt/YkmYZKouOtY/n1zfez5IVpTHuhhBUrF7Lkbf7p/GzyulxKz5sHMODmbNKSaFwtMuj9k1/T+8G1lBRPo2TuEtauXcPC31fxT2l07NGJ7l17k9e7D727pZGEpGalRYTc+yax9J4xbFhUzIx5pSyrqKBseSUx6V1zyczMIv/qAq7NzySSSJOR2L4vw1/sy32ri1nwUjmlyzawfk0ZG6rZK9Ixlws7dCDra/n0uz6fDqcjSdLBtYiQe/dj5N45hsq1Cyh/qZzSZRtYv6aMDdV8Ip2sq9O5oHM+l2flkNMjk/RTaTpOz2Lwz1cycGMZpa+UUjZvNes3l7FsI59ql0Vu2wvofH0uN91YQNZZiUiSJB1Xq8uZsZp4vQrIPYsj1jqngEFMZTL7qH6G8t/eT25OIv/QIp2CkXMo+N4GSufNoPSVZWzYsI6ytdX8U4QOORdy+SU3kltwEwVdIyRyGKfnUvTiOr69fCYzfllMaUUFZcsr+Yd2WeR2/gr5N/ajX68sIi04pMT2fRn+YgH3LS9mxi+LKa2ooGx5Jf+UTtbVmXylRwH9+hSQdVYiJ0Ji50E8tfQmlr34DM88PYMF5RuoBtK75pKTP5C+ffLJbdcalpcixYSiH0OSDuKK7rnUSUlJYfozP0eSdGxcf0Mv6qSmpvLyr2ajpmnUo2OY82IxdR760VCuuDwLSVLje2z845TMm0+dcaN/zJVXdkefL0uGh+j+Q/b38GKiw7I5kCXDQ3T/Ift7eDHRYdnsr4rpt5zJgBnsp/f0Lcy6NQ19fo16dAxzXiymzkM/GsoVl2chSWp8j41/nJJ586kzbvSPufLK7jREGEmSJEmSJEmSJKmZCCNJkiRJkiRJkiQ1Ey2QJEmSJEmSmpDsYVGiwzgi2cOiRIdxBNK47fkotyFJkpqbMJIkSZIkSZIkSVIzEUaSJEmSJEmSJElqJsJIkiRJkiRJkiRJzUQYSZIkSZIkSZIkqZkII0mSJEmSJEmSJDUTYSRJkiRJkiRJkqRmIowkSZIkSZIkSZLUTISRJEmSJEmSJEmSmokwkiRJkiRJkiRJUjMRRpIkSZIkSZIkSWomwkiSJEmSJEmSJEnNRBhJkiRJkiRJkiSpmQgjSZIkSZIkSZIkNRNhJEmSJEmSJEmSpGYijCRJkiRJkiRJktRMhJEkSZIkSZIkSZKaiTCSJEmSJEmSJElSMxFGkiRJkiRJkiRJaibCSJIkSZIkSZIkSc1EGEmSJEmSJEmSJKmZCCNJkiRJkiRJkiQ1E2EkSZIkSZIkSZKkZiKMJEmSJEmSJEmS1EyEkSRJkiRJkiRJkpqJMJJ0CAkJCdTZvXs3kqRjY9fu3cQkJCSgpishIYGY3bt3I0k6Nnbt3k1MQkICknS8JSQkELN7924kScfGrt27iUlISKChwkjSIfxLq1bU+fDDD5EkHRsf1tQQ8y+tWqGmq1WrVsTU1HyIJOnY+PDDD4lp1epLSNLx1qpVK2Jqaj5EknRsfPjhh8S0avUlGiqMJB1C2plp1IlGo7z33hYkSY3vj+9tISYtrQ1qutLS2hDz3pYtSJKOjS1bqohpk9YGSTre0tLaEPPeli1Iko6NLVuqiGmT1oaGCiNJh3Beu3bErN/wFpKkxrfhrbeIOe+8dqjpOi8jg5i33nobSVLj2759O5WbN1PntNOSOSM1FUk63s7LyCDmrbfeRpLU+LZv307l5s3UOe20ZM5ITaWhwkjSIWR2voiYlaveRJLU+FatqiCm88UXoabr4osvomXLFtRZ9WYFO3fuRJLUuFasXEVMZufOSNKJcPHFF9GyZQvqrHqzgp07dyJJalwrVq4iJrNzZ45EGEk6hCsuv5yY1xcvZs+eKJKkxrNtW8Cy5b+jTsuWLbni8izUdIXDYbK7XUFMWfnrSJIaV/lri4npdsXlSNKJEA6Hye52BTFl5a8jSWpc5a8tJqbbFZdzJMJI0iGkpbXh8qyu1Nm+/a/MnTcfSVLjmTt/ATFfy+tBy5YtUdOW16MHMfPYgfhbAAAHWElEQVQWlCJJajxbtlRRVv4aMXk9rkKSTpS8Hj2ImbegFElS49mypYqy8teIyetxFUcijCQdRsGNPYmZ9cIcJEmN48OPPmL27DnEFNzYEzV9X7/+Ws44I5U6a9aspfy115EkNY6Zs14g5sYbvk5ycjKSdKJ8/fprOeOMVOqsWbOW8tdeR5LUOGbOeoGYG2/4OsnJyRyJMJJ0GNfmf40LOnSgznvvbeHpqdORJH12v3hqKn/dsYM62d2u4LLLLkXNw639byHmqWemsWfPHiRJn82qNyt4ee48Yvrf0g9JOtFu7X8LMU89M409e/YgSfpsVr1Zwctz5xHT/5Z+HKkwktQAd915BzHTn32O5b9bgSTp6P164SJK5s4j5s5v3YGajwG3fZPz2mVQ5913/8iEx/8LSdLR27FjB48/8d/E3NKvDx3an48knWgDbvsm57XLoM677/6RCY//F5Kko7djxw4ef+K/ibmlXx86tD+fIxVGkhrgqtwcbiq4gZjRY3/Kpk3/iyTpyL1ZsZox435KzB23DySz88Woebnv3u8QUzJ3Ps89PxNJ0tF5dMxP2PyHd6lz7rnncO937kGSmor77v0OMSVz5/Pc8zORJB2dR8f8hM1/eJc65557Dvd+5x6ORsJ/fgxJaoDu2d14ffEStm3bRm1tLb9dtpyLL+rE6aefjiSpYVasWMlDwx9h165d1Lni8ix+OPRB1Pyce845hEMhfrdiJXVWvVlBy5YtufiiTkiSGmbPnj2MeORRlix9g5hRIx+m7bnnIElNxbnnnEM4FOJ3K1ZSZ9WbFbRs2ZKLL+qEJKlh9uzZw4hHHmXJ0jeIGTXyYdqeew5HI+E/P4YkNUA4HObSLpfwm/Jytm//K3/961955devktYmQkbGl5EkHVrxSyX8ePQ4du3aRZ3255/HmEdHkZSUhJqnS7tcwtat21i3fj11Vr1ZQfDnP5PV9TJCoRCSpIPbuGkTD48YxcpVbxIzrOg/uPqqXCSpqbm0yyVs3bqNdevXU2fVmxUEf/4zWV0vIxQKIUk6uI2bNvHwiFGsXPUmMcOK/oOrr8rlaCX858eQpAZKTm5N18su443f/paPPvoLe/bs4fXFS9lS9T4ZGem0atUKSVK8dzZuYvyEJ5g950ViLrzgAsaOHkVq6umoecu5sjtbt25j3fr11Hnr7Xf4TflrnHZaMult2yJJ2t+zz83gkVGj2bptGzEP/uABbiq4AUlqqnKu7M7WrdtYt349dd56+x1+U/4ap52WTHrbtkiS9vfsczN4ZNRotm7bRsyDP3iAmwpu4LMIRT+GJB2h99+v5uERj7D8dyvY1/XXXcvXrunBxRd1QpI+71asXEXpK7/m1UVl7KvH1bn8cGgRX/ziKejkMeXJXzBp8pPsq1Onjlx/bT65OV8lKSkJSfo8e2/LFhYtKuPlufPYti0gJjm5NUMf/AE5V34VSWoOpjz5CyZNfpJ9derUkeuvzSc356skJSUhSZ9n723ZwqJFZbw8dx7btgXEJCe3ZuiDPyDnyq/yWYWiH0OSjtKkyU8y5clfUN8ZqalcdFEnMr6czhmRM/jiKacQCoWQpJNVNBpl+/btvF/9JzZu3MTvf/8/fFBTw74SEhK49zv/h9tu7Y9OTkuWvsH4CY/zzsZN1HfJVzJp3/58zjrzTFq3/hcSEhKQpJPZ3/72N4Lgz2z+wx9Yt24972zcRH3X5F3Nd4fcSyQSQZKakyVL32D8hMd5Z+Mm6rvkK5m0b38+Z515Jq1b/wsJCQlI0snsb3/7G0HwZzb/4Q+sW7eedzZuor5r8q7mu0PuJRKJ0BhC0Y8hSZ/Bxk2bePqZ6cydNx9J0oH1vrkXd/zrQNLS2qCT37Tpv+TZ557nT3/aiiRpf1/J7MyA277JVbk5SFJzNm36L3n2uef505+2Ikna31cyOzPgtm9yVW4OjSkU/RiS1AjeffePzJ2/gLKycja89TaS9Hl38UWduOqqXHpefy2pqano82fuvAUsfPVVlix9g7//fReS9Hl2RmoqV361O/n513DZpV2QpJPJ3HkLWPjqqyxZ+gZ///suJOnz7IzUVK78anfy86/hsku7cCyEoh9DkhrZn/60lXXr17N58x/YunUbO3buIBqNIkknq1AoxBe/+EXOSE0lPb0tnTpeSHJyMlKdPXv28Pvf/w/vbNpEVdX7fPTRR+zevRtJOpl94Qtf4LTkZM4++yzatz+fdhkZSNLJbs+ePfz+9//DO5s2UVX1Ph999BG7d+9Gkk5mX/jCFzgtOZmzzz6L9u3Pp11GBsdaKPoxJEmSJEmSJEmSpGYgjCRJkiRJkiRJktRMhJEkSZIkSZIkSZKaiTCSJEmSJEmSJElSMxFGkiRJkiRJkiRJaibCSJIkSZIkSZIkSc1EGEmSJEmSJEmSJKmZCCNJkiRJkiRJkiQ1E2EkSZIkSZIkSZKkZiKMJEmSJEmSJEmS1EyEkSRJkiRJkiRJkpqJMJIkSZIkSZIkSVIz8f8BmmASmyZMfmQAAAAASUVORK5CYII=) + +### 2.1 可扩展性 + +TensorLayer3.0的相比于TensorLayer2.0之前的版本对后端进行了解耦。在之前的版本中我们设计的Layer直接使用了TensorFlow的算子,这为后续扩展后端带来不便。为此在新的版本中,我们将所有后端算子均封装在backend层,并且对不同框架之间的接口进行了统一,在构建Layer时均调用统一的接口来达到兼容多框架的目的。 + +### 2.2 简易性 + +TensorLayer3.0使用简单,我们设计了两种构建模型的方式,对于顺序连贯的模型我们提供了SequentialLayer来构建,对于复杂模型可以通过SubClass的方式继承Module来构建。在TensorLayer3.0中构建的网络模型可以当成Layer在__init__中初始化在forward中调用。TensorLayer3.0构建网络时可以无需计算上一层的输出(不用输入in_channels参数),通过最后init_build操作来完成参数初始化自动推断模型输出大小。 + +### 2.3 兼容性 + +TensorLayer3.0构建的模型能直接在TensorFlow, MindSpore, PaddlePaddle中使用,可以混合对应框架的算子进行使用。例如用TensorLayer搭建网络,使用TensorFlow后端,那么在数据处理和模型训练时可以直接用TensorFlow提供的算子完成。 + +## 三、数据集加载 + +TensorLayer内置了一些常见的数据集例如mnist, cifar10。这里加载手写数字识别数据集,用来模型训练和评估。 + +```python +import tensorlayer as tl +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) +``` +## 四、数据预处理 + +TensorLayer提供了大量数据处理操作,也可以直接使用对应框架数据处理操作完成你的数据构建。 + +Tensorlayer目前拥有完善的图像预处理操作。为了满足开发者习惯,集成以TensorFlow、MindSpore、PaddlePaddle为后端的图像算子。图像算子主要基于各框架本身tensor操作以及PIL、opencv库完成,并且能够自动根据全局后端环境变量将图像矩阵数据转换为后端框架对应的数据格式。为了图像算子在各框架后端保持一致,TensorLayer综合考虑TensorFlow、Mindspore、PaddlePaddle框架各自图像算子功能及参数,增加和调整不同后端框架源码扩展了图像处理功能。以PyTorch为后端的图像算子将在未来开发中更新。 + +TensorLayer的图像数据预处理例子如下: + +```python +import tensorlayer as tl +import numpy as np +image=(np.random.rand(224, 224, 3) * 255.).astype(np.uint8) +transform=tl.vision.transforms.Resize(size(100,100),interpolation='bilinear') +image=transform(image) +print(image.shape) +#image shape:(100, 100, 3) +image = (np.random.rand(224, 224, 3) * 255.).astype(np.uint8) +transform=tl.vision.transforms.Pad(padding=10,padding_value=0,mode='constant') +image = transform(image) +print(image.shape) +#image shape : (244, 244, 3) +``` +## 五、模型构建 + +### 5.1 SequentialLayer构建 + +针对有顺序的线性网络结构,你可以通过SequentialLayer来快速构建模型,可以减少定义网络等代码编写,具体如下:我们构建一个多层感知机模型。 + +```python +import tensorlayer as tl +from tensorlayer.layers import Dense +layer_list = [] +layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=784, name='Dense1')) +layer_list.append(Dense(n_units=800, act=tl.ReLU, in_channels=800, name='Dense2')) +layer_list.append(Dense(n_units=10, act=tl.ReLU, in_channels=800, name='Dense3')) +MLP = SequentialLayer(layer_list) +``` +### 5.2 继承基类Module构建 + +针对较为复杂的网络,可以使用Module子类定义的方式来进行模型构建,在__init__对Layer进行声明,在forward里使用声明的Layer进行前向计算。这种方式中声明的Layer可以进行复用,针对相同的Layer构造一次,在forward可以调用多次。同样我们构建一个多层感知机模型。 + +```python +import tensorlayer as tl +from tensorlayer.layers import Module, Dropout, Dense +class MLP(Module): + def __init__(self): + super(CustomModel, self).__init__() + self.dense1 = Dense(n_units=800, act=tl.ReLU, in_channels=784) + self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) + self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) + def forward(self, x): + z = self.dense1(z) + z = self.dense2(z) + out = self.dense3(z) + return out +``` +### 5.3 构建复杂网络结构 + +在构建网络时,我们经常遇到一些模块重复使用多次,可以通过循环来构建。 + +例如在网络中需要将感知机当成一个Block,并且使用三次,我们先定义要多次调用的Block + +```python +import tensorlayer as tl +from tensorlayer.layers import Module, Dense, Elementwise +class Block(Module): + def __init__(self, in_channels): + super(Block, self).__init__() + self.dense1 = Dense(in_channels=in_channels, n_units=256) + self.dense2 = Dense(in_channels=256, n_units=384) + self.dense3 = Dense(in_channels=in_channels, n_units=384) + self.concat = Elementwise(combine_fn=tl.ops.add) + + def forward(self, inputs): + z = self.dense1(inputs) + z1 = self.dense2(z) + z2 = self.dense3(inputs) + out = self.concat([z1, z2]) + return out +``` +定义好Block后我们通过SequentialLayer和Module构建网络 +```python +class CNN(Module): + def __init__(self): + super(CNN, self).__init__() + self.flatten = Flatten(name='flatten') + self.dense1 = Dense(384, act=tl.ReLU, in_channels=2304) + self.dense_add = self.make_layer(in_channel=384) + self.dense2 = Dense(192, act=tl.ReLU, n_channels=384) + self.dense3 = Dense(10, act=None, in_channels=192) + + def forward(self, x): + z = self.flatten(z) + z = self.dense1(z) + z = self.dense_add(z) + z = self.dense2(z) + z = self.dense3(z) + return z + + def make_layer(self, in_channel): + layers = [] + _block = Block(in_channel) + layers.append(_block) + for _ in range(1, 3): + range_block = Block(in_channel) + layers.append(range_block) + return SequentialLayer(layers) +``` +### 5.4 自动推断上一层输出大小 + +我们构建网络时经常需要手动输入上一层的输出大小,作为下一层的输入,也就是每个Layer中的in_channels参数。在TensoLayer中也可以无需输入in_channels,构建网络后给定网络的输入调用一次参数初始化即可。 + +```python +import tensorlayer as tl +from tensorlayer.layers import Module, Dense +class CustomModel(Module): + def __init__(self): + super(CustomModel, self).__init__() + self.dense1 = Dense(n_units=800) + self.dense2 = Dense(n_units=800, act=tl.ReLU) + self.dense3 = Dense(n_units=10, act=tl.ReLU) + + def forward(self, x): + z = self.dense1(z) + z = self.dense2(z) + out = self.dense3(z) + return out +MLP = CustomModel() +input = tl.layers.Input(shape=(1, 784)) +MLP.init_build(input) +``` +## 六、模型训练 + +TensorLayer提供了模型训练模块,可以直接调用进行训练。TensorLayer构建的模型也能支持在其他框架中直接使用,如用TensorLayer构建MLP模型,使用的是TensorFlow后端,那么可以使用TensoFlow的算子完成模型训练。 + +### 6.1 调用模型训练模块训练 + +调用封装好的models模块进行训练。 + +```python +import tensorlayer as tl +optimizer = tl.optimizers.Momentum(0.05, 0.9) +model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer) +model.train(n_epoch=500, train_dataset=train_ds, print_freq=2) +``` +### 6.2 混合对应框架算子进行训练 + +混合TensorFlow进行训练。下面例子中optimizer和loss均可以使用TensorFlow的算子 + +```python +import tensorlayer as tl +import tensorflow as tf +optimizer = tl.optimizers.Momentum(0.05, 0.9) +# optimizer = tf.optimizers.Momentum(0.05, 0.9) +for epoch in range(n_epoch): + for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): + MLP.set_train() + with tf.GradientTape() as tape: + _logits = MLP(X_batch) + _loss = tl.cost.softmax_cross_entropy_with_logits(_logits, y_batch) + grad = tape.gradient(_loss, train_weights) + optimizer.apply_gradients(zip(grad, train_weights)) +``` + +## 七、完整实例 + +同一套代码通过设置后端进行切换不同后端训练,无需修改代码。在os.environ['TL_BACKEND']中可以设置为'tensorflow’,'mindspore', 'paddle'。 + +```python +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +import os +os.environ['TL_BACKEND'] = 'tensorflow' +# os.environ['TL_BACKEND'] = 'mindspore' +# os.environ['TL_BACKEND'] = 'paddle' +import numpy as np +import tensorlayer as tl +from tensorlayer.layers import Module +from tensorlayer.layers import Dense, Dropout +X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) +class CustomModel(Module): + def __init__(self): + super(CustomModel, self).__init__() + self.dropout1 = Dropout(keep=0.8) + self.dense1 = Dense(n_units=800, act=tl.ReLU, in_channels=784) + self.dropout2 = Dropout(keep=0.8) + self.dense2 = Dense(n_units=800, act=tl.ReLU, in_channels=800) + self.dropout3 = Dropout(keep=0.8) + self.dense3 = Dense(n_units=10, act=tl.ReLU, in_channels=800) + def forward(self, x, foo=None): + z = self.dropout1(x) + z = self.dense1(z) + # z = self.bn(z) + z = self.dropout2(z) + z = self.dense2(z) + z = self.dropout3(z) + out = self.dense3(z) + if foo is not None: + out = tl.ops.relu(out) + return out +def generator_train(): + inputs = X_train + targets = y_train + if len(inputs) != len(targets): + raise AssertionError("The length of inputs and targets should be equal") + for _input, _target in zip(inputs, targets): + yield (_input, np.array(_target)) +MLP = CustomModel() +n_epoch = 50 +batch_size = 128 +print_freq = 2 +shuffle_buffer_size = 128 +train_weights = MLP.trainable_weights +optimizer = tl.optimizers.Momentum(0.05, 0.9) +train_ds = tl.dataflow.FromGenerator( + generator_train, output_types=(tl.float32, tl.int32) , column_names=['data', 'label'] +) +train_ds = tl.dataflow.Shuffle(train_ds,shuffle_buffer_size) +train_ds = tl.dataflow.Batch(train_ds,batch_size) +model = tl.models.Model(network=MLP, loss_fn=tl.cost.softmax_cross_entropy_with_logits, optimizer=optimizer) +model.train(n_epoch=n_epoch, train_dataset=train_ds, print_freq=print_freq, print_train_batch=False) +model.save_weights('./model.npz', format='npz_dict') +model.load_weights('./model.npz', format='npz_dict') +``` +## 八、预训练模型 + +在TensorLayer中我们将持续提供了丰富的预训练模型,和应用。例如VGG16, VGG19, ResNet50, YOLOv4.下面例子展示了在MS-COCO数据集中利用YOLOv4进行目标检测,对应预训练模型和数据可以从examples/model_zoo中找到。 + +```python +import numpy as np +import cv2 +from PIL import Image +from examples.model_zoo.common import yolo4_input_processing, yolo4_output_processing, \ + result_to_json, read_class_names, draw_boxes_and_labels_to_image_with_json +from examples.model_zoo.yolo import YOLOv4 +import tensorlayer as tl +tl.logging.set_verbosity(tl.logging.DEBUG) +INPUT_SIZE = 416 +image_path = './data/kite.jpg' +class_names = read_class_names('./model/coco.names') +original_image = cv2.imread(image_path) +image = cv2.cvtColor(np.array(original_image), cv2.COLOR_BGR2RGB) +model = YOLOv4(NUM_CLASS=80, pretrained=True) +model.set_eval() +batch_data = yolo4_input_processing(original_image) +feature_maps = model(batch_data) +pred_bbox = yolo4_output_processing(feature_maps) +json_result = result_to_json(image, pred_bbox) +image = draw_boxes_and_labels_to_image_with_json(image, json_result, class_names) +image = Image.fromarray(image.astype(np.uint8)) +image.show() +``` +## 九、自定义Layer + +在TensorLayer中自定以Layer需要继承Module,在build中我们对训练参数进行定义,在forward中我们定义前向计算。下面给出用TensorFlow后端时,定义全连接层$$a=f(x*W + b)$$如果你想定义其他后端的Dense需要将算子换成对应后端。 + +如果要定义一个通用的Layer则要把算子接口进行统一后封装在backend中,具体可以参考tensorlayer/layers中的Layer。 + +```python +from tensorlayer.layers import Module +class Dense(Module): + def __init__( + self, + n_units, + act=None, + name=None, + in_channels = None + ): + super(Dense, self).__init__(name, act=act) + self.n_units = n_units + self.in_channels = in_channels + self.build() + self._built = True + def build(self): # initialize the model weights here + shape = [self.in_channels, self.n_units] + self.W = self._get_weights("weights", shape=tuple(shape), init=self.W_init) + self.b = self._get_weights("biases", shape=(self.n_units, ), init=self.b_init) + def forward(self, inputs): # call function + z = tf.matmul(inputs, self.W) + self.b + if self.act: # is not None + z = self.act(z) + return z +``` + diff --git a/tests/layers/test_layers_pooling.py b/tests/layers/test_layers_pooling.py index 65643fc..de61d52 100644 --- a/tests/layers/test_layers_pooling.py +++ b/tests/layers/test_layers_pooling.py @@ -33,15 +33,15 @@ class Layer_Pooling_Test(CustomTestCase): n19 = tl.layers.AdaptiveMeanPool1d(output_size=44, name='test_adaptivemeanpool1d')(n1) n20 = tl.layers.AdaptiveMaxPool1d(output_size=44, name='test_adaptivemaxpool1d')(n1) - cls.n1_shape = n1.get_shape().as_list() - cls.n2_shape = n2.get_shape().as_list() - cls.n3_shape = n3.get_shape().as_list() - cls.n4_shape = n4.get_shape().as_list() - cls.n5_shape = n5.get_shape().as_list() - cls.n16_shape = n16.get_shape().as_list() - cls.n17_shape = n17.get_shape().as_list() - cls.n19_shape = n19.get_shape().as_list() - cls.n20_shape = n20.get_shape().as_list() + cls.n1_shape = tl.get_tensor_shape(n1) + cls.n2_shape = tl.get_tensor_shape(n2) + cls.n3_shape = tl.get_tensor_shape(n3) + cls.n4_shape = tl.get_tensor_shape(n4) + cls.n5_shape = tl.get_tensor_shape(n5) + cls.n16_shape = tl.get_tensor_shape(n16) + cls.n17_shape = tl.get_tensor_shape(n17) + cls.n19_shape = tl.get_tensor_shape(n19) + cls.n20_shape = tl.get_tensor_shape(n20) ## 2D ======================================================================== @@ -58,15 +58,14 @@ class Layer_Pooling_Test(CustomTestCase): n21 = tl.layers.AdaptiveMeanPool2d(output_size=(45, 32), name='test_adaptivemeanpool2d')(n6) n22 = tl.layers.AdaptiveMaxPool2d(output_size=(45, 32), name='test_adaptivemaxpool2d')(n6) - cls.n6_shape = n6.get_shape().as_list() - cls.n7_shape = n7.get_shape().as_list() - cls.n8_shape = n8.get_shape().as_list() - cls.n9_shape = n9.get_shape().as_list() - cls.n10_shape = n10.get_shape().as_list() - cls.n15_shape = n15.get_shape().as_list() - # cls.n18_shape = n18.get_shape().as_list() - cls.n21_shape = n21.get_shape().as_list() - cls.n22_shape = n22.get_shape().as_list() + cls.n6_shape = tl.get_tensor_shape(n6) + cls.n7_shape = tl.get_tensor_shape(n7) + cls.n8_shape = tl.get_tensor_shape(n8) + cls.n9_shape = tl.get_tensor_shape(n9) + cls.n10_shape = tl.get_tensor_shape(n10) + cls.n15_shape = tl.get_tensor_shape(n15) + cls.n21_shape = tl.get_tensor_shape(n21) + cls.n22_shape = tl.get_tensor_shape(n22) ## 3D ========================================================================