You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

modeling.py 7.1 kB

first commit Former-commit-id: 08bc23ba02cffbce3cf63962390a65459a132e48 [formerly 0795edd4834b9b7dc66db8d10d4cbaf42bbf82cb] [formerly b5010b42541add7e2ea2578bf2da537efc457757 [formerly a7ca09c2c34c4fc8b3d8e01fcfa08eeeb2cae99d]] [formerly 615058473a2177ca5b89e9edbb797f4c2a59c7e5 [formerly 743d8dfc6843c4c205051a8ab309fbb2116c895e] [formerly bb0ea98b1e14154ef464e2f7a16738705894e54b [formerly 960a69da74b81ef8093820e003f2d6c59a34974c]]] [formerly 2fa3be52c1b44665bc81a7cc7d4cea4bbf0d91d5 [formerly 2054589f0898627e0a17132fd9d4cc78efc91867] [formerly 3b53730e8a895e803dfdd6ca72bc05e17a4164c1 [formerly 8a2fa8ab7baf6686d21af1f322df46fd58c60e69]] [formerly 87d1e3a07a19d03c7d7c94d93ab4fa9f58dada7c [formerly f331916385a5afac1234854ee8d7f160f34b668f] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18 [formerly 386086f05aa9487f65bce2ee54438acbdce57650]]]] Former-commit-id: a00aed8c934a6460c4d9ac902b9a74a3d6864697 [formerly 26fdeca29c2f07916d837883983ca2982056c78e] [formerly 0e3170d41a2f99ecf5c918183d361d4399d793bf [formerly 3c12ad4c88ac5192e0f5606ac0d88dd5bf8602dc]] [formerly d5894f84f2fd2e77a6913efdc5ae388cf1be0495 [formerly ad3e7bc670ff92c992730d29c9d3aa1598d844e8] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18]] Former-commit-id: 3c19c9fae64f6106415fbc948a4dc613b9ee12f8 [formerly 467ddc0549c74bb007e8f01773bb6dc9103b417d] [formerly 5fa518345d958e2760e443b366883295de6d991c [formerly 3530e130b9fdb7280f638dbc2e785d2165ba82aa]] Former-commit-id: 9f5d473d42a435ec0d60149939d09be1acc25d92 [formerly be0b25c4ec2cde052a041baf0e11f774a158105d] Former-commit-id: 9eca71cb73ba9edccd70ac06a3b636b8d4093b04
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. from keras.models import Sequential, load_model
  2. from keras.callbacks import History, EarlyStopping, Callback
  3. from keras.layers.recurrent import LSTM
  4. from keras.layers.core import Dense, Activation, Dropout
  5. from keras.layers import Flatten
  6. import numpy as np
  7. import os
  8. import logging
  9. # suppress tensorflow CPU speedup warnings
  10. os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
  11. logger = logging.getLogger('telemanom')
  12. class Model:
  13. def __init__(self, channel,patience,min_delta,layers,dropout,n_predictions,loss_metric,
  14. optimizer,lstm_batch_size,epochs,validation_split,batch_size,l_s
  15. ):
  16. """
  17. Loads/trains RNN and predicts future telemetry values for a channel.
  18. Args:
  19. config (obj): Config object containing parameters for processing
  20. and model training
  21. run_id (str): Datetime referencing set of predictions in use
  22. channel (obj): Channel class object containing train/test data
  23. for X,y for a single channel
  24. Attributes:
  25. config (obj): see Args
  26. chan_id (str): channel id
  27. run_id (str): see Args
  28. y_hat (arr): predicted channel values
  29. model (obj): trained RNN model for predicting channel values
  30. """
  31. # self.config = config
  32. # self.chan_id = channel.id
  33. # self.run_id = run_id
  34. self.y_hat = np.array([])
  35. self.model = None
  36. # self.save()
  37. self._patience = patience
  38. self._min_delta = min_delta
  39. self._layers = layers
  40. self._dropout = dropout
  41. self._n_predictions = n_predictions
  42. self._loss_metric = loss_metric
  43. self._optimizer = optimizer
  44. self._lstm_batch_size = lstm_batch_size
  45. self._epochs = epochs
  46. self._validation_split = validation_split
  47. self._batch_size = batch_size
  48. self._l_s = l_s
  49. self.train_new(channel)
  50. # def load(self):
  51. # """
  52. # Load model for channel.
  53. # """
  54. # logger.info('Loading pre-trained model')
  55. # self.model = load_model(os.path.join('data', self.config.use_id,
  56. # 'models', self.chan_id + '.h5'))
  57. def train_new(self, channel):
  58. """
  59. Train LSTM model according to specifications in config.yaml.
  60. Args:
  61. channel (obj): Channel class object containing train/test data
  62. for X,y for a single channel
  63. """
  64. cbs = [History(), EarlyStopping(monitor='val_loss',
  65. patience=self._patience,
  66. min_delta=self._min_delta,
  67. verbose=1)]
  68. self.model = Sequential()
  69. self.model.add(LSTM(
  70. self._layers[0],
  71. input_shape=(None, channel.X_train.shape[2]),
  72. return_sequences=True))
  73. self.model.add(Dropout(self._dropout))
  74. self.model.add(LSTM(
  75. self._layers[1],
  76. return_sequences=False))
  77. self.model.add(Dropout(self._dropout))
  78. self.model.add(Dense(
  79. self._n_predictions
  80. *channel.X_train.shape[2]
  81. ))
  82. self.model.add(Activation('linear'))
  83. self.model.compile(loss=self._loss_metric,
  84. optimizer=self._optimizer)
  85. # print(self.model.summary())
  86. self.model.fit(channel.X_train,
  87. channel.y_train,
  88. batch_size=self._lstm_batch_size,
  89. epochs=self._epochs,
  90. validation_split=self._validation_split,
  91. callbacks=cbs,
  92. verbose=True)
  93. # def save(self):
  94. # """
  95. # Save trained model.
  96. # """
  97. # self.model.save(os.path.join('data', self.run_id, 'models',
  98. # '{}.h5'.format(self.chan_id)))
  99. def aggregate_predictions(self, y_hat_batch, method='mean'):
  100. """
  101. Aggregates predictions for each timestep. When predicting n steps
  102. ahead where n > 1, will end up with multiple predictions for a
  103. timestep.
  104. Args:
  105. y_hat_batch (arr): predictions shape (<batch length>, <n_preds)
  106. method (string): indicates how to aggregate for a timestep - "first"
  107. or "mean"
  108. """
  109. agg_y_hat_batch = np.array([])
  110. for t in range(len(y_hat_batch)):
  111. start_idx = t - self._n_predictions
  112. start_idx = start_idx if start_idx >= 0 else 0
  113. # predictions pertaining to a specific timestep lie along diagonal
  114. y_hat_t = np.flipud(y_hat_batch[start_idx:t+1]).diagonal()
  115. if method == 'first':
  116. agg_y_hat_batch = np.append(agg_y_hat_batch, [y_hat_t[0]])
  117. elif method == 'mean':
  118. agg_y_hat_batch = np.append(agg_y_hat_batch, np.mean(y_hat_t))
  119. agg_y_hat_batch = agg_y_hat_batch.reshape(len(agg_y_hat_batch), 1)
  120. self.y_hat = np.append(self.y_hat, agg_y_hat_batch)
  121. def batch_predict(self, channel):
  122. """
  123. Used trained LSTM model to predict test data arriving in batches.
  124. Args:
  125. channel (obj): Channel class object containing train/test data
  126. for X,y for a single channel
  127. Returns:
  128. channel (obj): Channel class object with y_hat values as attribute
  129. """
  130. # num_batches = int((y_test.shape[0] - self._l_s)
  131. # / self._batch_size)
  132. # if num_batches < 0:
  133. # raise ValueError('l_s ({}) too large for stream length {}.'
  134. # .format(self._l_s, y_test.shape[0]))
  135. # # simulate data arriving in batches, predict each batch
  136. # for i in range(0, num_batches + 1):
  137. # prior_idx = i * self._batch_size
  138. # idx = (i + 1) * self._batch_size
  139. # if i + 1 == num_batches + 1:
  140. # # remaining values won't necessarily equal batch size
  141. # idx = y_test.shape[0]
  142. # X_test_batch = X_test[prior_idx:idx]
  143. # y_hat_batch = self.model.predict(X_test_batch)
  144. # y_hat_batch = np.reshape(y_hat_batch,(X_test.shape[0],self._n_predictions,X_test.shape[2]))
  145. # # print("PREDICTIONS",y_hat_batch.shape)
  146. # self.aggregate_predictions(y_hat_batch)
  147. # self.y_hat = np.reshape(self.y_hat, (self.y_hat.size,))
  148. # channel.y_hat = self.y_hat
  149. # # np.save(os.path.join('data', self.run_id, 'y_hat', '{}.npy'
  150. # # .format(self.chan_id)), self.y_hat)
  151. # return channel
  152. self.y_hat = self.model.predict(channel.X_test)
  153. self.y_hat = np.reshape(self.y_hat,(channel.X_test.shape[0],self._n_predictions,channel.X_test.shape[2]))
  154. # print("shape before ",self.y_hat.shape)
  155. channel.y_hat = self.y_hat
  156. return channel

全栈的自动化机器学习系统,主要针对多变量时间序列数据的异常检测。TODS提供了详尽的用于构建基于机器学习的异常检测系统的模块,它们包括:数据处理(data processing),时间序列处理( time series processing),特征分析(feature analysis),检测算法(detection algorithms),和强化模块( reinforcement module)。这些模块所提供的功能包括常见的数据预处理、时间序列数据的平滑或变换,从时域或频域中抽取特征、多种多样的检测算