You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

Telemanom.py 15 kB

first commit Former-commit-id: 08bc23ba02cffbce3cf63962390a65459a132e48 [formerly 0795edd4834b9b7dc66db8d10d4cbaf42bbf82cb] [formerly b5010b42541add7e2ea2578bf2da537efc457757 [formerly a7ca09c2c34c4fc8b3d8e01fcfa08eeeb2cae99d]] [formerly 615058473a2177ca5b89e9edbb797f4c2a59c7e5 [formerly 743d8dfc6843c4c205051a8ab309fbb2116c895e] [formerly bb0ea98b1e14154ef464e2f7a16738705894e54b [formerly 960a69da74b81ef8093820e003f2d6c59a34974c]]] [formerly 2fa3be52c1b44665bc81a7cc7d4cea4bbf0d91d5 [formerly 2054589f0898627e0a17132fd9d4cc78efc91867] [formerly 3b53730e8a895e803dfdd6ca72bc05e17a4164c1 [formerly 8a2fa8ab7baf6686d21af1f322df46fd58c60e69]] [formerly 87d1e3a07a19d03c7d7c94d93ab4fa9f58dada7c [formerly f331916385a5afac1234854ee8d7f160f34b668f] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18 [formerly 386086f05aa9487f65bce2ee54438acbdce57650]]]] Former-commit-id: a00aed8c934a6460c4d9ac902b9a74a3d6864697 [formerly 26fdeca29c2f07916d837883983ca2982056c78e] [formerly 0e3170d41a2f99ecf5c918183d361d4399d793bf [formerly 3c12ad4c88ac5192e0f5606ac0d88dd5bf8602dc]] [formerly d5894f84f2fd2e77a6913efdc5ae388cf1be0495 [formerly ad3e7bc670ff92c992730d29c9d3aa1598d844e8] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18]] Former-commit-id: 3c19c9fae64f6106415fbc948a4dc613b9ee12f8 [formerly 467ddc0549c74bb007e8f01773bb6dc9103b417d] [formerly 5fa518345d958e2760e443b366883295de6d991c [formerly 3530e130b9fdb7280f638dbc2e785d2165ba82aa]] Former-commit-id: 9f5d473d42a435ec0d60149939d09be1acc25d92 [formerly be0b25c4ec2cde052a041baf0e11f774a158105d] Former-commit-id: 9eca71cb73ba9edccd70ac06a3b636b8d4093b04
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. from typing import Any, Callable, List, Dict, Union, Optional, Sequence, Tuple
  2. from numpy import ndarray
  3. from collections import OrderedDict
  4. from scipy import sparse
  5. import os
  6. import sklearn
  7. import numpy as np
  8. import typing
  9. import pandas as pd
  10. from keras.models import Sequential, load_model
  11. from keras.callbacks import History, EarlyStopping, Callback
  12. from keras.layers.recurrent import LSTM
  13. from keras.layers.core import Dense, Activation, Dropout
  14. from keras.layers import Flatten
  15. from d3m import container, utils
  16. from d3m.base import utils as base_ut
  17. from d3m.container.numpy import ndarray as d3m_ndarray
  18. from d3m.container import DataFrame as d3m_dataframe
  19. from d3m.metadata import hyperparams, params, base as metadata_base
  20. from d3m import utils
  21. from d3m.base import utils as base_utils
  22. from d3m.exceptions import PrimitiveNotFittedError
  23. from d3m.primitive_interfaces.base import CallResult, DockerContainer
  24. from d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
  25. from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
  26. from detection_algorithm.UODBasePrimitive import Params_ODBase, Hyperparams_ODBase, UnsupervisedOutlierDetectorBase
  27. from detection_algorithm.core.CollectiveBase import CollectiveBaseDetector
  28. from sklearn.utils import check_array
  29. # from d3m.primitive_interfaces.base import ProbabilisticCompositionalityMixin, ContinueFitMixin
  30. from d3m import exceptions
  31. # from detection_algorithm.UODBasePrimitive import Params_ODBase, Hyperparams_ODBase, UnsupervisedOutlierDetectorBase
  32. from detection_algorithm.core.utils.errors import Errors
  33. from detection_algorithm.core.utils.channel import Channel
  34. from detection_algorithm.core.utils.modeling import Model
  35. # from pyod.models.base import BaseDetector
  36. __all__ = ('Telemanom',)
  37. Inputs = container.DataFrame
  38. Outputs = container.DataFrame
  39. class Params(Params_ODBase):
  40. ######## Add more Attributes #######
  41. pass
  42. class Hyperparams(Hyperparams_ODBase):
  43. smoothing_perc = hyperparams.Hyperparameter[float](
  44. default=0.05,
  45. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
  46. description="determines window size used in EWMA smoothing (percentage of total values for channel)"
  47. )
  48. window_size_ = hyperparams.Hyperparameter[int](
  49. default=100,
  50. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  51. description="number of trailing batches to use in error calculation"
  52. )
  53. error_buffer = hyperparams.Hyperparameter[int](
  54. default=50,
  55. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  56. description="number of values surrounding an error that are brought into the sequence (promotes grouping on nearby sequences"
  57. )
  58. batch_size = hyperparams.Hyperparameter[int](
  59. default=70,
  60. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  61. description="Batch size while predicting"
  62. )
  63. # LSTM Model Parameters
  64. dropout = hyperparams.Hyperparameter[float](
  65. default=0.3,
  66. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
  67. description="Dropout rate"
  68. )
  69. validation_split = hyperparams.Hyperparameter[float](
  70. default=0.2,
  71. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  72. description="Validation split"
  73. )
  74. optimizer = hyperparams.Hyperparameter[typing.Union[str, None]](
  75. default='Adam',
  76. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  77. description="Optimizer"
  78. )
  79. lstm_batch_size = hyperparams.Hyperparameter[int](
  80. default=64,
  81. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  82. description="lstm model training batch size"
  83. )
  84. loss_metric = hyperparams.Hyperparameter[typing.Union[str, None]](
  85. default='mean_squared_error',
  86. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  87. description="loss function"
  88. )
  89. layers = hyperparams.List(
  90. elements=hyperparams.Hyperparameter[int](1),
  91. default=[10,10],
  92. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
  93. description="No of units for the 2 lstm layers"
  94. )
  95. # Training Parameters
  96. epochs = hyperparams.Hyperparameter[int](
  97. default=1,
  98. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
  99. description="Epoch"
  100. )
  101. patience = hyperparams.Hyperparameter[int](
  102. default=10,
  103. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  104. description="Number of consequetive training iterations to allow without decreasing the val_loss by at least min_delta"
  105. )
  106. min_delta = hyperparams.Hyperparameter[float](
  107. default=0.0003,
  108. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
  109. description="Number of consequetive training iterations to allow without decreasing the val_loss by at least min_delta"
  110. )
  111. l_s = hyperparams.Hyperparameter[int](
  112. default=100,
  113. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
  114. description="num previous timesteps provided to model to predict future values"
  115. )
  116. n_predictions = hyperparams.Hyperparameter[int](
  117. default=10,
  118. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  119. description="number of steps ahead to predict"
  120. )
  121. # Error thresholding parameters
  122. # ==================================
  123. p = hyperparams.Hyperparameter[float](
  124. default=0.05,
  125. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
  126. description="minimum percent decrease between max errors in anomalous sequences (used for pruning)"
  127. )
  128. # Contamination
  129. contamination = hyperparams.Uniform(
  130. lower=0.,
  131. upper=0.5,
  132. default=0.1,
  133. description='the amount of contamination of the data set, i.e.the proportion of outliers in the data set. Used when fitting to define the threshold on the decision function',
  134. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
  135. )
  136. class TelemanomPrimitive(UnsupervisedOutlierDetectorBase[Inputs, Outputs, Params, Hyperparams]):
  137. """
  138. A primitive that uses telmanom for outlier detection
  139. Parameters
  140. ----------
  141. """
  142. __author__ = "Data Lab"
  143. metadata = metadata_base.PrimitiveMetadata(
  144. {
  145. '__author__' : "DATA Lab at Texas A&M University",
  146. 'name': "Telemanom",
  147. 'python_path': 'd3m.primitives.tods.detection_algorithm.telemanom',
  148. 'source': {
  149. 'name': 'DATA Lab at Texas A&M University',
  150. 'contact': 'mailto:khlai037@tamu.edu',
  151. 'uris': [
  152. 'https://gitlab.com/lhenry15/tods.git',
  153. 'https://gitlab.com/lhenry15/tods/-/blob/purav/anomaly-primitives/anomaly_primitives/telemanom.py',
  154. ],
  155. },
  156. 'algorithm_types': [
  157. metadata_base.PrimitiveAlgorithmType.TELEMANOM,
  158. ],
  159. 'primitive_family': metadata_base.PrimitiveFamily.ANOMALY_DETECTION,
  160. 'id': 'c7259da6-7ce6-42ad-83c6-15238679f5fa',
  161. 'hyperparameters_to_tune':['layers','loss_metric','optimizer','epochs','p','l_s','patience','min_delta','dropout','smoothing_perc'],
  162. 'version': '0.0.1',
  163. },
  164. )
  165. def __init__(self, *,
  166. hyperparams: Hyperparams, #
  167. random_seed: int = 0,
  168. docker_containers: Dict[str, DockerContainer] = None) -> None:
  169. super().__init__(hyperparams=hyperparams, random_seed=random_seed, docker_containers=docker_containers)
  170. self._clf = Detector(smoothing_perc=self.hyperparams['smoothing_perc'],
  171. window_size=self.hyperparams['window_size_'],
  172. error_buffer=self.hyperparams['error_buffer'],
  173. batch_size = self.hyperparams['batch_size'],
  174. validation_split = self.hyperparams['validation_split'],
  175. optimizer = self.hyperparams['optimizer'],
  176. lstm_batch_size = self.hyperparams['lstm_batch_size'],
  177. loss_metric = self.hyperparams['loss_metric'],
  178. layers = self.hyperparams['layers'],
  179. epochs = self.hyperparams['epochs'],
  180. patience = self.hyperparams['patience'],
  181. min_delta = self.hyperparams['min_delta'],
  182. l_s = self.hyperparams['l_s'],
  183. n_predictions = self.hyperparams['n_predictions'],
  184. p = self.hyperparams['p'],
  185. contamination=hyperparams['contamination']
  186. )
  187. def set_training_data(self, *, inputs: Inputs) -> None:
  188. """
  189. Set training data for outlier detection.
  190. Args:
  191. inputs: Container DataFrame
  192. Returns:
  193. None
  194. """
  195. super().set_training_data(inputs=inputs)
  196. def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
  197. """
  198. Fit model with training data.
  199. Args:
  200. *: Container DataFrame. Time series data up to fit.
  201. Returns:
  202. None
  203. """
  204. return super().fit()
  205. def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
  206. """
  207. Process the testing data.
  208. Args:
  209. inputs: Container DataFrame. Time series data up to outlier detection.
  210. Returns:
  211. Container DataFrame
  212. 1 marks Outliers, 0 marks normal.
  213. """
  214. return super().produce(inputs=inputs, timeout=timeout, iterations=iterations)
  215. def produce_score(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
  216. """
  217. Process the testing data.
  218. Args:
  219. inputs: Container DataFrame. Time series data up to outlier detection.
  220. Returns:
  221. Container DataFrame
  222. Outlier score of input DataFrame.
  223. """
  224. return super().produce_score(inputs=inputs, timeout=timeout, iterations=iterations)
  225. def get_params(self) -> Params:
  226. """
  227. Return parameters.
  228. Args:
  229. None
  230. Returns:
  231. class Params
  232. """
  233. return super().get_params()
  234. def set_params(self, *, params: Params) -> None:
  235. """
  236. Set parameters for outlier detection.
  237. Args:
  238. params: class Params
  239. Returns:
  240. None
  241. """
  242. super().set_params(params=params)
  243. class Detector(CollectiveBaseDetector):
  244. """Class to Implement Deep Log LSTM based on "https://www.cs.utah.edu/~lifeifei/papers/deeplog.pdf
  245. Only Parameter Value anomaly detection layer has been implemented for time series data"""
  246. def __init__(self,smoothing_perc=0.05,window_size = 10,error_buffer = 5,batch_size =30, \
  247. dropout = 0.3, validation_split=0.2,optimizer='adam',lstm_batch_size=64,loss_metric='mean_squared_error', \
  248. layers=[40,40],epochs = 1,patience =10,min_delta=0.0003,l_s=5,n_predictions=2,p = 0.05,contamination=0.1):
  249. # super(Detector, self).__init__(contamination=contamination)
  250. super(Detector, self).__init__(contamination=contamination,
  251. window_size=l_s,
  252. step_size=1,
  253. )
  254. self._smoothin_perc = smoothing_perc
  255. self._window_size =window_size
  256. self._error_buffer = error_buffer
  257. self._batch_size = batch_size
  258. self._dropout = dropout
  259. self._validation_split = validation_split
  260. self._optimizer = optimizer
  261. self._lstm_batch_size = lstm_batch_size
  262. self._loss_metric = loss_metric
  263. self._layers = layers
  264. self._epochs = epochs
  265. self._patience = patience
  266. self._min_delta = min_delta
  267. self._l_s = l_s
  268. self._n_predictions = n_predictions
  269. self._p = p
  270. self.contamination = contamination
  271. # self.y_hat = None
  272. self.results = []
  273. self.result_df = None
  274. self._model = None
  275. self._channel = None
  276. def fit(self,X,y=None):
  277. """
  278. Fit data to LSTM model.
  279. Args:
  280. inputs : X , ndarray of size (number of sample,features)
  281. Returns:
  282. return : self object with trained model
  283. """
  284. X = check_array(X).astype(np.float)
  285. self._set_n_classes(None)
  286. inputs = X
  287. self._channel = Channel(n_predictions = self._n_predictions,l_s = self._l_s)
  288. self._channel.shape_train_data(inputs)
  289. self._model = Model(self._channel,patience = self._patience,
  290. min_delta =self._min_delta,
  291. layers = self._layers,
  292. dropout = self._dropout,
  293. n_predictions = self._n_predictions,
  294. loss_metric = self._loss_metric,
  295. optimizer = self._optimizer,
  296. lstm_batch_size = self._lstm_batch_size,
  297. epochs = self._epochs,
  298. validation_split = self._validation_split,
  299. batch_size = self._batch_size,
  300. l_s = self._l_s
  301. )
  302. self.decision_scores_, self.left_inds_, self.right_inds_ = self.decision_function(X)
  303. self._process_decision_scores()
  304. return self
  305. def decision_function(self, X: np.array):
  306. """Predict raw anomaly scores of X using the fitted detector.
  307. The anomaly score of an input sample is computed based on the fitted
  308. detector. For consistency, outliers are assigned with
  309. higher anomaly scores.
  310. Parameters
  311. ----------
  312. X : numpy array of shape (n_samples, n_features)
  313. The input samples. Sparse matrices are accepted only
  314. if they are supported by the base estimator.
  315. Returns
  316. -------
  317. anomaly_scores : numpy array of shape (n_samples,)
  318. The anomaly score of the input samples.
  319. """
  320. X = check_array(X).astype(np.float)
  321. self._set_n_classes(None)
  322. inputs = X
  323. self._channel.shape_test_data(inputs)
  324. self._channel = self._model.batch_predict(channel = self._channel)
  325. errors = Errors(channel = self._channel,
  326. window_size = self._window_size,
  327. batch_size = self._batch_size,
  328. smoothing_perc = self._smoothin_perc,
  329. n_predictions = self._n_predictions,
  330. l_s = self._l_s,
  331. error_buffer = self._error_buffer,
  332. p = self._p
  333. )
  334. # prediciton smoothed error
  335. prediction_errors = np.reshape(errors.e_s,(self._channel.X_train.shape[0],self._channel.X_train.shape[2]))
  336. prediction_errors = np.sum(prediction_errors,axis=1)
  337. left_indices = []
  338. right_indices = []
  339. scores = []
  340. for i in range(len(prediction_errors)):
  341. left_indices.append(i)
  342. right_indices.append(i+self._l_s)
  343. scores.append(prediction_errors[i])
  344. return np.asarray(scores),np.asarray(left_indices),np.asarray(right_indices)
  345. # if __name__ == "__main__":
  346. # csv = pd.read_csv("/home/purav/Downloads/yahoo_train.csv")
  347. # # X_train = np.asarray(
  348. # # [3., 4., 8., 16, 18, 13., 22., 36., 59., 128, 62, 67, 78, 100]).reshape(-1, 1)
  349. # # X_test = np.asarray(
  350. # # [3., 4., 8.6, 13.4, 22.5, 17, 19.2, 36.1, 127, -23, 59.2]).reshape(-1,1)
  351. # # print(X_train.shape, X_test.shape)
  352. # X_train = csv.iloc[:,[2,3,4,5,6]].values
  353. # clf = Detector(contamination=0.1)
  354. # clf.fit(X_train)
  355. # # pred_scores = clf.decision_function(X_test)
  356. # pred_labels = clf.predict(X_train)
  357. # print(clf.threshold_)
  358. # # print(np.percentile(pred_scores, 100 * 0.9))
  359. # # print('pred_scores: ',pred_scores)
  360. # print('scores: ',pred_labels[0].shape)
  361. # print('left_indices: ',pred_labels[1].shape)
  362. # print('right_indices: ',pred_labels[2].shape)

全栈的自动化机器学习系统,主要针对多变量时间序列数据的异常检测。TODS提供了详尽的用于构建基于机器学习的异常检测系统的模块,它们包括:数据处理(data processing),时间序列处理( time series processing),特征分析(feature analysis),检测算法(detection algorithms),和强化模块( reinforcement module)。这些模块所提供的功能包括常见的数据预处理、时间序列数据的平滑或变换,从时域或频域中抽取特征、多种多样的检测算