You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

PCA.py 11 kB

first commit Former-commit-id: 08bc23ba02cffbce3cf63962390a65459a132e48 [formerly 0795edd4834b9b7dc66db8d10d4cbaf42bbf82cb] [formerly b5010b42541add7e2ea2578bf2da537efc457757 [formerly a7ca09c2c34c4fc8b3d8e01fcfa08eeeb2cae99d]] [formerly 615058473a2177ca5b89e9edbb797f4c2a59c7e5 [formerly 743d8dfc6843c4c205051a8ab309fbb2116c895e] [formerly bb0ea98b1e14154ef464e2f7a16738705894e54b [formerly 960a69da74b81ef8093820e003f2d6c59a34974c]]] [formerly 2fa3be52c1b44665bc81a7cc7d4cea4bbf0d91d5 [formerly 2054589f0898627e0a17132fd9d4cc78efc91867] [formerly 3b53730e8a895e803dfdd6ca72bc05e17a4164c1 [formerly 8a2fa8ab7baf6686d21af1f322df46fd58c60e69]] [formerly 87d1e3a07a19d03c7d7c94d93ab4fa9f58dada7c [formerly f331916385a5afac1234854ee8d7f160f34b668f] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18 [formerly 386086f05aa9487f65bce2ee54438acbdce57650]]]] Former-commit-id: a00aed8c934a6460c4d9ac902b9a74a3d6864697 [formerly 26fdeca29c2f07916d837883983ca2982056c78e] [formerly 0e3170d41a2f99ecf5c918183d361d4399d793bf [formerly 3c12ad4c88ac5192e0f5606ac0d88dd5bf8602dc]] [formerly d5894f84f2fd2e77a6913efdc5ae388cf1be0495 [formerly ad3e7bc670ff92c992730d29c9d3aa1598d844e8] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18]] Former-commit-id: 3c19c9fae64f6106415fbc948a4dc613b9ee12f8 [formerly 467ddc0549c74bb007e8f01773bb6dc9103b417d] [formerly 5fa518345d958e2760e443b366883295de6d991c [formerly 3530e130b9fdb7280f638dbc2e785d2165ba82aa]] Former-commit-id: 9f5d473d42a435ec0d60149939d09be1acc25d92 [formerly be0b25c4ec2cde052a041baf0e11f774a158105d] Former-commit-id: 9eca71cb73ba9edccd70ac06a3b636b8d4093b04
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. # -*- coding: utf-8 -*-
  2. """Autoregressive model for multivariate time series outlier detection.
  3. """
  4. import numpy as np
  5. from sklearn.utils import check_array
  6. from sklearn.utils.validation import check_is_fitted
  7. from detection_algorithm.core.CollectiveBase import CollectiveBaseDetector
  8. from pyod.models.pca import PCA as PCA_PYOD
  9. from detection_algorithm.core.utility import get_sub_matrices
  10. class PCA(CollectiveBaseDetector):
  11. """PCA-based outlier detection with both univariate and multivariate
  12. time series data. TS data will be first transformed to tabular format.
  13. For univariate data, it will be in shape of [valid_length, window_size].
  14. for multivariate data with d sequences, it will be in the shape of
  15. [valid_length, window_size].
  16. Parameters
  17. ----------
  18. window_size : int
  19. The moving window size.
  20. step_size : int, optional (default=1)
  21. The displacement for moving window.
  22. contamination : float in (0., 0.5), optional (default=0.1)
  23. The amount of contamination of the data set,
  24. i.e. the proportion of outliers in the data set. Used when fitting to
  25. define the threshold on the decision function.
  26. n_components : int, float, None or string
  27. Number of components to keep. It should be smaller than the window_size.
  28. if n_components is not set all components are kept::
  29. n_components == min(n_samples, n_features)
  30. if n_components == 'mle' and svd_solver == 'full', Minka\'s MLE is used
  31. to guess the dimension
  32. if ``0 < n_components < 1`` and svd_solver == 'full', select the number
  33. of components such that the amount of variance that needs to be
  34. explained is greater than the percentage specified by n_components
  35. n_components cannot be equal to n_features for svd_solver == 'arpack'.
  36. n_selected_components : int, optional (default=None)
  37. Number of selected principal components
  38. for calculating the outlier scores. It is not necessarily equal to
  39. the total number of the principal components. If not set, use
  40. all principal components.
  41. copy : bool (default True)
  42. If False, data passed to fit are overwritten and running
  43. fit(X).transform(X) will not yield the expected results,
  44. use fit_transform(X) instead.
  45. whiten : bool, optional (default False)
  46. When True (False by default) the `components_` vectors are multiplied
  47. by the square root of n_samples and then divided by the singular values
  48. to ensure uncorrelated outputs with unit component-wise variances.
  49. Whitening will remove some information from the transformed signal
  50. (the relative variance scales of the components) but can sometime
  51. improve the predictive accuracy of the downstream estimators by
  52. making their data respect some hard-wired assumptions.
  53. svd_solver : string {'auto', 'full', 'arpack', 'randomized'}
  54. auto :
  55. the solver is selected by a default policy based on `X.shape` and
  56. `n_components`: if the input data is larger than 500x500 and the
  57. number of components to extract is lower than 80% of the smallest
  58. dimension of the data, then the more efficient 'randomized'
  59. method is enabled. Otherwise the exact full SVD is computed and
  60. optionally truncated afterwards.
  61. full :
  62. run exact full SVD calling the standard LAPACK solver via
  63. `scipy.linalg.svd` and select the components by postprocessing
  64. arpack :
  65. run SVD truncated to n_components calling ARPACK solver via
  66. `scipy.sparse.linalg.svds`. It requires strictly
  67. 0 < n_components < X.shape[1]
  68. randomized :
  69. run randomized SVD by the method of Halko et al.
  70. tol : float >= 0, optional (default .0)
  71. Tolerance for singular values computed by svd_solver == 'arpack'.
  72. iterated_power : int >= 0, or 'auto', (default 'auto')
  73. Number of iterations for the power method computed by
  74. svd_solver == 'randomized'.
  75. random_state : int, RandomState instance or None, optional (default None)
  76. If int, random_state is the seed used by the random number generator;
  77. If RandomState instance, random_state is the random number generator;
  78. If None, the random number generator is the RandomState instance used
  79. by `np.random`. Used when ``svd_solver`` == 'arpack' or 'randomized'.
  80. weighted : bool, optional (default=True)
  81. If True, the eigenvalues are used in score computation.
  82. The eigenvectors with small eigenvalues comes with more importance
  83. in outlier score calculation.
  84. standardization : bool, optional (default=True)
  85. If True, perform standardization first to convert
  86. data to zero mean and unit variance.
  87. See http://scikit-learn.org/stable/auto_examples/preprocessing/plot_scaling_importance.html
  88. Attributes
  89. ----------
  90. decision_scores_ : numpy array of shape (n_samples,)
  91. The outlier scores of the training data.
  92. The higher, the more abnormal. Outliers tend to have higher
  93. scores. This value is available once the detector is
  94. fitted.
  95. threshold_ : float
  96. The threshold is based on ``contamination``. It is the
  97. ``n_samples * contamination`` most abnormal samples in
  98. ``decision_scores_``. The threshold is calculated for generating
  99. binary outlier labels.
  100. labels_ : int, either 0 or 1
  101. The binary labels of the training data. 0 stands for inliers
  102. and 1 for outliers/anomalies. It is generated by applying
  103. ``threshold_`` on ``decision_scores_``.
  104. """
  105. def __init__(self, window_size, step_size=1, contamination=0.1,
  106. n_components=None, n_selected_components=None,
  107. copy=True, whiten=False, svd_solver='auto',
  108. tol=0.0, iterated_power='auto', random_state=None,
  109. weighted=True, standardization=True):
  110. super(PCA, self).__init__(contamination=contamination)
  111. self.window_size = window_size
  112. self.step_size = step_size
  113. # parameters for PCA
  114. self.n_components = n_components
  115. self.n_selected_components = n_selected_components
  116. self.copy = copy
  117. self.whiten = whiten
  118. self.svd_solver = svd_solver
  119. self.tol = tol
  120. self.iterated_power = iterated_power
  121. self.random_state = random_state
  122. self.weighted = weighted
  123. self.standardization = standardization
  124. # initialize a kNN model
  125. self.model_ = PCA_PYOD(n_components=self.n_components,
  126. n_selected_components=self.n_selected_components,
  127. contamination=self.contamination,
  128. copy=self.copy,
  129. whiten=self.whiten,
  130. svd_solver=self.svd_solver,
  131. tol=self.tol,
  132. iterated_power=self.iterated_power,
  133. random_state=self.random_state,
  134. weighted=self.weighted,
  135. standardization=self.standardization)
  136. def fit(self, X: np.array) -> object:
  137. """Fit detector. y is ignored in unsupervised methods.
  138. Parameters
  139. ----------
  140. X : numpy array of shape (n_samples, n_features)
  141. The input samples.
  142. y : Ignored
  143. Not used, present for API consistency by convention.
  144. Returns
  145. -------
  146. self : object
  147. Fitted estimator.
  148. """
  149. X = check_array(X).astype(np.float)
  150. # first convert it into submatrices, and flatten it
  151. sub_matrices, self.left_inds_, self.right_inds_ = get_sub_matrices(
  152. X,
  153. self.window_size,
  154. self.step_size,
  155. return_numpy=True,
  156. flatten=True,
  157. flatten_order='F')
  158. # if self.n_components > sub_matrices.shape[1]:
  159. # raise ValueError('n_components exceeds window_size times the number of sequences.')
  160. # fit the PCA model
  161. self.model_.fit(sub_matrices)
  162. self.decision_scores_ = self.model_.decision_scores_
  163. self._process_decision_scores()
  164. return self
  165. def decision_function(self, X: np.array):
  166. """Predict raw anomaly scores of X using the fitted detector.
  167. The anomaly score of an input sample is computed based on the fitted
  168. detector. For consistency, outliers are assigned with
  169. higher anomaly scores.
  170. Parameters
  171. ----------
  172. X : numpy array of shape (n_samples, n_features)
  173. The input samples. Sparse matrices are accepted only
  174. if they are supported by the base estimator.
  175. Returns
  176. -------
  177. anomaly_scores : numpy array of shape (n_samples,)
  178. The anomaly score of the input samples.
  179. """
  180. check_is_fitted(self, ['model_'])
  181. X = check_array(X).astype(np.float)
  182. # first convert it into submatrices, and flatten it
  183. sub_matrices, X_left_inds, X_right_inds = get_sub_matrices(
  184. X,
  185. self.window_size,
  186. self.step_size,
  187. return_numpy=True,
  188. flatten=True,
  189. flatten_order='F')
  190. # return the prediction result by PCA
  191. return self.model_.decision_function(
  192. sub_matrices), X_left_inds.ravel(), X_right_inds.ravel()
  193. if __name__ == "__main__":
  194. # X_train = np.asarray(
  195. # [3., 4., 8., 16, 18, 13., 22., 36., 59., 128, 62, 67, 78, 100]).reshape(-1, 1)
  196. # X_test = np.asarray(
  197. # [3., 4., 8.6, 13.4, 22.5, 17, 19.2, 36.1, 127, -23, 59.2]).reshape(-1,
  198. # 1)
  199. X_train = np.asarray(
  200. [[3., 5], [5., 9], [7., 2], [42., 20], [8., 12], [10., 12],
  201. [12., 12],
  202. [18., 16], [20., 7], [18., 10], [23., 12], [22., 15]])
  203. w = get_sub_matrices(X_train, window_size=3, step=2, flatten=False)
  204. X_test = np.asarray(
  205. [[12., 10], [8., 12], [80., 80], [92., 983],
  206. [18., 16], [20., 7], [18., 10], [3., 5], [5., 9], [23., 12],
  207. [22., 15]])
  208. clf = PCA(window_size=3, step_size=2, contamination=0.2)
  209. clf.fit(X_train)
  210. decision_scores, left_inds_, right_inds = clf.decision_scores_, \
  211. clf.left_inds_, clf.right_inds_
  212. print(clf.left_inds_, clf.right_inds_)
  213. pred_scores, X_left_inds, X_right_inds = clf.decision_function(X_test)
  214. pred_labels, X_left_inds, X_right_inds = clf.predict(X_test)
  215. pred_probs, X_left_inds, X_right_inds = clf.predict_proba(X_test)
  216. print(pred_scores)
  217. print(pred_labels)
  218. print(pred_probs)

全栈的自动化机器学习系统,主要针对多变量时间序列数据的异常检测。TODS提供了详尽的用于构建基于机器学习的异常检测系统的模块,它们包括:数据处理(data processing),时间序列处理( time series processing),特征分析(feature analysis),检测算法(detection algorithms),和强化模块( reinforcement module)。这些模块所提供的功能包括常见的数据预处理、时间序列数据的平滑或变换,从时域或频域中抽取特征、多种多样的检测算