You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

PyodVAE.py 14 kB

first commit Former-commit-id: 08bc23ba02cffbce3cf63962390a65459a132e48 [formerly 0795edd4834b9b7dc66db8d10d4cbaf42bbf82cb] [formerly b5010b42541add7e2ea2578bf2da537efc457757 [formerly a7ca09c2c34c4fc8b3d8e01fcfa08eeeb2cae99d]] [formerly 615058473a2177ca5b89e9edbb797f4c2a59c7e5 [formerly 743d8dfc6843c4c205051a8ab309fbb2116c895e] [formerly bb0ea98b1e14154ef464e2f7a16738705894e54b [formerly 960a69da74b81ef8093820e003f2d6c59a34974c]]] [formerly 2fa3be52c1b44665bc81a7cc7d4cea4bbf0d91d5 [formerly 2054589f0898627e0a17132fd9d4cc78efc91867] [formerly 3b53730e8a895e803dfdd6ca72bc05e17a4164c1 [formerly 8a2fa8ab7baf6686d21af1f322df46fd58c60e69]] [formerly 87d1e3a07a19d03c7d7c94d93ab4fa9f58dada7c [formerly f331916385a5afac1234854ee8d7f160f34b668f] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18 [formerly 386086f05aa9487f65bce2ee54438acbdce57650]]]] Former-commit-id: a00aed8c934a6460c4d9ac902b9a74a3d6864697 [formerly 26fdeca29c2f07916d837883983ca2982056c78e] [formerly 0e3170d41a2f99ecf5c918183d361d4399d793bf [formerly 3c12ad4c88ac5192e0f5606ac0d88dd5bf8602dc]] [formerly d5894f84f2fd2e77a6913efdc5ae388cf1be0495 [formerly ad3e7bc670ff92c992730d29c9d3aa1598d844e8] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18]] Former-commit-id: 3c19c9fae64f6106415fbc948a4dc613b9ee12f8 [formerly 467ddc0549c74bb007e8f01773bb6dc9103b417d] [formerly 5fa518345d958e2760e443b366883295de6d991c [formerly 3530e130b9fdb7280f638dbc2e785d2165ba82aa]] Former-commit-id: 9f5d473d42a435ec0d60149939d09be1acc25d92 [formerly be0b25c4ec2cde052a041baf0e11f774a158105d] Former-commit-id: 9eca71cb73ba9edccd70ac06a3b636b8d4093b04
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. from typing import Any, Callable, List, Dict, Union, Optional, Sequence, Tuple
  2. from numpy import ndarray
  3. from collections import OrderedDict
  4. from scipy import sparse
  5. import os
  6. import sklearn
  7. import numpy
  8. import typing
  9. import tensorflow
  10. from tensorflow.keras.losses import mean_squared_error
  11. from tensorflow import keras
  12. from tensorflow.keras import losses,layers
  13. # Custom import commands if any
  14. import warnings
  15. import numpy as np
  16. from sklearn.utils import check_array
  17. from sklearn.exceptions import NotFittedError
  18. # from numba import njit
  19. from pyod.utils.utility import argmaxn
  20. from d3m.container.numpy import ndarray as d3m_ndarray
  21. from d3m.container import DataFrame as d3m_dataframe
  22. from d3m.metadata import hyperparams, params, base as metadata_base
  23. from d3m import utils
  24. from d3m.base import utils as base_utils
  25. from d3m.exceptions import PrimitiveNotFittedError
  26. from d3m.primitive_interfaces.base import CallResult, DockerContainer
  27. # from d3m.primitive_interfaces.supervised_learning import SupervisedLearnerPrimitiveBase
  28. from d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
  29. from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
  30. from d3m.primitive_interfaces.base import ProbabilisticCompositionalityMixin, ContinueFitMixin
  31. from d3m import exceptions
  32. import pandas
  33. from d3m import container, utils as d3m_utils
  34. from detection_algorithm.UODBasePrimitive import Params_ODBase, Hyperparams_ODBase, UnsupervisedOutlierDetectorBase
  35. from pyod.models.vae import VAE
  36. import uuid
  37. # from typing import Union
  38. Inputs = d3m_dataframe
  39. Outputs = d3m_dataframe
  40. class Params(Params_ODBase):
  41. ######## Add more Attributes #######
  42. pass
  43. class Hyperparams(Hyperparams_ODBase):
  44. ######## Add more Hyperparamters #######
  45. encoder_neurons = hyperparams.List(
  46. default=[4, 2, 4],
  47. elements=hyperparams.Hyperparameter[int](1),
  48. description='The number of neurons per hidden layers in encoder.',
  49. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  50. )
  51. decoder_neurons = hyperparams.List(
  52. default=[4, 4, 4],
  53. elements=hyperparams.Hyperparameter[int](1),
  54. description='The number of neurons per hidden layers in decoder.',
  55. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  56. )
  57. hidden_activation = hyperparams.Enumeration[str](
  58. values=['relu', 'sigmoid', 'softmax', 'softplus', 'softsign',
  59. 'tanh', 'selu', 'elu', 'exponential'],
  60. default='relu',
  61. description='Activation function to use for hidden layers.',
  62. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  63. )
  64. output_activation = hyperparams.Enumeration[str](
  65. values=['relu', 'sigmoid', 'softmax', 'softplus', 'softsign',
  66. 'tanh', 'selu', 'elu', 'exponential'],
  67. default='sigmoid',
  68. description='Activation function to use for output layer.',
  69. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  70. )
  71. loss = hyperparams.Enumeration[str](
  72. values=['mean_squared_error'],
  73. default='mean_squared_error',
  74. description='Loss function.',
  75. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  76. )
  77. gamma = hyperparams.Hyperparameter[float](
  78. default=1.0,
  79. description='Coefficient of beta VAE regime. Default is regular VAE.',
  80. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  81. )
  82. capacity = hyperparams.Hyperparameter[float](
  83. default=0.0,
  84. description='Maximum capacity of a loss bottle neck.',
  85. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  86. )
  87. optimizer = hyperparams.Enumeration[str](
  88. values=['SGD', 'RMSprop', 'adam', 'Adadelta', 'Adagrad',
  89. 'Adamax', 'Nadam', 'Ftrl'],
  90. default='adam',
  91. description='String (name of optimizer) or optimizer instance.',
  92. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  93. )
  94. epochs = hyperparams.Hyperparameter[int](
  95. default=100,
  96. description='Number of epochs to train the model.',
  97. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  98. )
  99. batch_size = hyperparams.Hyperparameter[int](
  100. default=32,
  101. description='Number of samples per gradient update.',
  102. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  103. )
  104. dropout_rate = hyperparams.Uniform(
  105. lower=0.,
  106. upper=1.,
  107. default=0.2,
  108. description='The dropout to be used across all layers.',
  109. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  110. )
  111. l2_regularizer = hyperparams.Uniform(
  112. lower=0.,
  113. upper=1.,
  114. default=0.1,
  115. description='The regularization strength of activity_regularizer applied on each layer.',
  116. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  117. )
  118. validation_size = hyperparams.Uniform(
  119. lower=0.,
  120. upper=1.,
  121. default=0.1,
  122. description='The percentage of data to be used for validation.',
  123. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  124. )
  125. preprocessing = hyperparams.UniformBool(
  126. default=True,
  127. description='If True, apply standardization on the data.',
  128. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  129. )
  130. verbosity = hyperparams.Enumeration[int](
  131. values=[0, 1, 2],
  132. default=1,
  133. description='Verbosity mode.',
  134. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  135. )
  136. random_state = hyperparams.Union[Union[int, None]](
  137. configuration=OrderedDict(
  138. init=hyperparams.Hyperparameter[int](
  139. default=0,
  140. ),
  141. ninit=hyperparams.Hyperparameter[None](
  142. default=None,
  143. ),
  144. ),
  145. default='ninit',
  146. description='the seed used by the random number generator.',
  147. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  148. )
  149. contamination = hyperparams.Uniform(
  150. lower=0.,
  151. upper=0.5,
  152. default=0.01,
  153. description='The amount of contamination of the data set, i.e. the proportion of outliers in the data set. ',
  154. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  155. )
  156. pass
  157. class VariationalAutoEncoder(UnsupervisedOutlierDetectorBase[Inputs, Outputs, Params, Hyperparams]):
  158. """
  159. Auto Encoder (AE) is a type of neural networks for learning useful data
  160. representations unsupervisedly. Similar to PCA, AE could be used to
  161. detect outlying objects in the data by calculating the reconstruction
  162. errors. See :cite:`aggarwal2015outlier` Chapter 3 for details.
  163. Parameters
  164. ----------
  165. hidden_neurons : list, optional (default=[4, 2, 4])
  166. The number of neurons per hidden layers.
  167. hidden_activation : str, optional (default='relu')
  168. Activation function to use for hidden layers.
  169. All hidden layers are forced to use the same type of activation.
  170. See https://keras.io/activations/
  171. output_activation : str, optional (default='sigmoid')
  172. Activation function to use for output layer.
  173. See https://keras.io/activations/
  174. loss : str or obj, optional (default=keras.losses.mean_squared_error)
  175. String (name of objective function) or objective function.
  176. See https://keras.io/losses/
  177. optimizer : str, optional (default='adam')
  178. String (name of optimizer) or optimizer instance.
  179. See https://keras.io/optimizers/
  180. epochs : int, optional (default=100)
  181. Number of epochs to train the model.
  182. batch_size : int, optional (default=32)
  183. Number of samples per gradient update.
  184. dropout_rate : float in (0., 1), optional (default=0.2)
  185. The dropout to be used across all layers.
  186. l2_regularizer : float in (0., 1), optional (default=0.1)
  187. The regularization strength of activity_regularizer
  188. applied on each layer. By default, l2 regularizer is used. See
  189. https://keras.io/regularizers/
  190. validation_size : float in (0., 1), optional (default=0.1)
  191. The percentage of data to be used for validation.
  192. preprocessing : bool, optional (default=True)
  193. If True, apply standardization on the data.
  194. verbose : int, optional (default=1)
  195. Verbosity mode.
  196. - 0 = silent
  197. - 1 = progress bar
  198. - 2 = one line per epoch.
  199. For verbosity >= 1, model summary may be printed.
  200. random_state : random_state: int, RandomState instance or None, optional
  201. (default=None)
  202. If int, random_state is the seed used by the random
  203. number generator; If RandomState instance, random_state is the random
  204. number generator; If None, the random number generator is the
  205. RandomState instance used by `np.random`.
  206. contamination : float in (0., 0.5), optional (default=0.1)
  207. The amount of contamination of the data set, i.e.
  208. the proportion of outliers in the data set. When fitting this is used
  209. to define the threshold on the decision function.
  210. Attributes
  211. ----------
  212. encoding_dim_ : int
  213. The number of neurons in the encoding layer.
  214. compression_rate_ : float
  215. The ratio between the original feature and
  216. the number of neurons in the encoding layer.
  217. model_ : Keras Object
  218. The underlying AutoEncoder in Keras.
  219. history_: Keras Object
  220. The AutoEncoder training history.
  221. decision_scores_ : numpy array of shape (n_samples,)
  222. The outlier scores of the training data.
  223. The higher, the more abnormal. Outliers tend to have higher
  224. scores. This value is available once the detector is
  225. fitted.
  226. threshold_ : float
  227. The threshold is based on ``contamination``. It is the
  228. ``n_samples * contamination`` most abnormal samples in
  229. ``decision_scores_``. The threshold is calculated for generating
  230. binary outlier labels.
  231. labels_ : int, either 0 or 1
  232. The binary labels of the training data. 0 stands for inliers
  233. and 1 for outliers/anomalies. It is generated by applying
  234. ``threshold_`` on ``decision_scores_``.
  235. """
  236. metadata = metadata_base.PrimitiveMetadata({
  237. "name": "TODS.anomaly_detection_primitives.VariationalAutoEncoder",
  238. "python_path": "d3m.primitives.tods.detection_algorithm.pyod_vae",
  239. "source": {'name': "DATA Lab at Texas A&M University", 'contact': 'mailto:khlai037@tamu.edu','uris': ['https://gitlab.com/lhenry15/tods.git']},
  240. "algorithm_types": [metadata_base.PrimitiveAlgorithmType.VARIATIONAL_AUTO_ENCODER, ],
  241. "primitive_family": metadata_base.PrimitiveFamily.ANOMALY_DETECTION,
  242. "version": "0.0.1",
  243. "hyperparameters_to_tune": [''],
  244. "id": str(uuid.uuid3(uuid.NAMESPACE_DNS, 'AutoEncoderPrimitive')),
  245. })
  246. def __init__(self, *,
  247. hyperparams: Hyperparams, #
  248. random_seed: int = 0,
  249. docker_containers: Dict[str, DockerContainer] = None) -> None:
  250. super().__init__(hyperparams=hyperparams, random_seed=random_seed, docker_containers=docker_containers)
  251. if hyperparams['loss'] == 'mean_squared_error':
  252. loss = keras.losses.mean_squared_error
  253. else:
  254. raise ValueError('VAE only suports mean squered error for now')
  255. self._clf = VAE(contamination=hyperparams['contamination'],
  256. encoder_neurons=hyperparams['encoder_neurons'],
  257. decoder_neurons=hyperparams['decoder_neurons'],
  258. hidden_activation=hyperparams['hidden_activation'],
  259. output_activation=hyperparams['output_activation'],
  260. loss=loss,
  261. gamma=hyperparams['gamma'],
  262. capacity=hyperparams['capacity'],
  263. optimizer=hyperparams['optimizer'],
  264. epochs=hyperparams['epochs'],
  265. batch_size=hyperparams['batch_size'],
  266. dropout_rate=hyperparams['dropout_rate'],
  267. l2_regularizer=hyperparams['l2_regularizer'],
  268. validation_size=hyperparams['validation_size'],
  269. preprocessing=hyperparams['preprocessing'],
  270. verbosity=hyperparams['verbosity'],
  271. random_state=hyperparams['random_state'],
  272. )
  273. return
  274. def set_training_data(self, *, inputs: Inputs) -> None:
  275. """
  276. Set training data for outlier detection.
  277. Args:
  278. inputs: Container DataFrame
  279. Returns:
  280. None
  281. """
  282. super().set_training_data(inputs=inputs)
  283. def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
  284. """
  285. Fit model with training data.
  286. Args:
  287. *: Container DataFrame. Time series data up to fit.
  288. Returns:
  289. None
  290. """
  291. return super().fit()
  292. def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
  293. """
  294. Process the testing data.
  295. Args:
  296. inputs: Container DataFrame. Time series data up to outlier detection.
  297. Returns:
  298. Container DataFrame
  299. 1 marks Outliers, 0 marks normal.
  300. """
  301. return super().produce(inputs=inputs, timeout=timeout, iterations=iterations)
  302. def get_params(self) -> Params:
  303. """
  304. Return parameters.
  305. Args:
  306. None
  307. Returns:
  308. class Params
  309. """
  310. return super().get_params()
  311. def set_params(self, *, params: Params) -> None:
  312. """
  313. Set parameters for outlier detection.
  314. Args:
  315. params: class Params
  316. Returns:
  317. None
  318. """
  319. super().set_params(params=params)

全栈的自动化机器学习系统,主要针对多变量时间序列数据的异常检测。TODS提供了详尽的用于构建基于机器学习的异常检测系统的模块,它们包括:数据处理(data processing),时间序列处理( time series processing),特征分析(feature analysis),检测算法(detection algorithms),和强化模块( reinforcement module)。这些模块所提供的功能包括常见的数据预处理、时间序列数据的平滑或变换,从时域或频域中抽取特征、多种多样的检测算