You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

DiscreteCosineTransform.py 20 kB

first commit Former-commit-id: 08bc23ba02cffbce3cf63962390a65459a132e48 [formerly 0795edd4834b9b7dc66db8d10d4cbaf42bbf82cb] [formerly b5010b42541add7e2ea2578bf2da537efc457757 [formerly a7ca09c2c34c4fc8b3d8e01fcfa08eeeb2cae99d]] [formerly 615058473a2177ca5b89e9edbb797f4c2a59c7e5 [formerly 743d8dfc6843c4c205051a8ab309fbb2116c895e] [formerly bb0ea98b1e14154ef464e2f7a16738705894e54b [formerly 960a69da74b81ef8093820e003f2d6c59a34974c]]] [formerly 2fa3be52c1b44665bc81a7cc7d4cea4bbf0d91d5 [formerly 2054589f0898627e0a17132fd9d4cc78efc91867] [formerly 3b53730e8a895e803dfdd6ca72bc05e17a4164c1 [formerly 8a2fa8ab7baf6686d21af1f322df46fd58c60e69]] [formerly 87d1e3a07a19d03c7d7c94d93ab4fa9f58dada7c [formerly f331916385a5afac1234854ee8d7f160f34b668f] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18 [formerly 386086f05aa9487f65bce2ee54438acbdce57650]]]] Former-commit-id: a00aed8c934a6460c4d9ac902b9a74a3d6864697 [formerly 26fdeca29c2f07916d837883983ca2982056c78e] [formerly 0e3170d41a2f99ecf5c918183d361d4399d793bf [formerly 3c12ad4c88ac5192e0f5606ac0d88dd5bf8602dc]] [formerly d5894f84f2fd2e77a6913efdc5ae388cf1be0495 [formerly ad3e7bc670ff92c992730d29c9d3aa1598d844e8] [formerly 69fb3c78a483343f5071da4f7e2891b83a49dd18]] Former-commit-id: 3c19c9fae64f6106415fbc948a4dc613b9ee12f8 [formerly 467ddc0549c74bb007e8f01773bb6dc9103b417d] [formerly 5fa518345d958e2760e443b366883295de6d991c [formerly 3530e130b9fdb7280f638dbc2e785d2165ba82aa]] Former-commit-id: 9f5d473d42a435ec0d60149939d09be1acc25d92 [formerly be0b25c4ec2cde052a041baf0e11f774a158105d] Former-commit-id: 9eca71cb73ba9edccd70ac06a3b636b8d4093b04
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. import os
  2. import typing
  3. import pandas as pd
  4. import numpy as np
  5. from d3m import container, utils
  6. from d3m.base import utils as base_utils
  7. from d3m.metadata import base as metadata_base, hyperparams
  8. from d3m.primitive_interfaces import base, transformer
  9. import common_primitives
  10. import logging
  11. import math
  12. from scipy.fft import dct
  13. from collections import OrderedDict
  14. from typing import cast, Dict, List, Union, Sequence, Optional, Tuple
  15. from scipy import sparse
  16. from numpy import ndarray
  17. __all__ = ('DiscreteCosineTransform',)
  18. Inputs = container.DataFrame
  19. Outputs = container.DataFrame
  20. class Hyperparams(hyperparams.Hyperparams):
  21. type_ = hyperparams.UniformInt(
  22. lower=1,
  23. upper=4,
  24. upper_inclusive = True,
  25. default=2,
  26. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  27. description="Type of the DCT. Default is 2",
  28. )
  29. axis = hyperparams.Hyperparameter[int](
  30. default=-1,
  31. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  32. description="Axis over which to compute the DCT. If not given, the last axis is used.",
  33. )
  34. n = hyperparams.Union[Union[int, None]](
  35. configuration=OrderedDict(
  36. limit=hyperparams.Bounded[int](
  37. lower=1,
  38. upper=None,
  39. default=10,
  40. ),
  41. unlimited=hyperparams.Constant(
  42. default=None,
  43. description='If n is not given, the length of the input along the axis specified by axis is used.',
  44. ),
  45. ),
  46. default='unlimited',
  47. description='Length of the transformed axis of the output. If n is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros.',
  48. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  49. )
  50. norm = hyperparams.Enumeration(
  51. values=[None,"ortho"],
  52. default=None,
  53. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  54. description="Normalization mode. Default is None, meaning no normalization on the forward transforms and scaling by 1/n on the ifft. For norm=""ortho"", both directions are scaled by 1/sqrt(n).",
  55. )
  56. overwrite_x = hyperparams.UniformBool(
  57. default=False,
  58. semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
  59. description="If True, the contents of x can be destroyed; the default is False. See the notes below for more details.",
  60. )
  61. workers = hyperparams.Union[Union[float, None]](
  62. configuration=OrderedDict(
  63. limit=hyperparams.Bounded[int](
  64. lower=1,
  65. upper=None,
  66. default=10,
  67. ),
  68. unlimited=hyperparams.Constant(
  69. default=None,
  70. description='If nothing is give as a paramter',
  71. ),
  72. ),
  73. default='unlimited',
  74. description="Maximum number of workers to use for parallel computation. If negative, the value wraps around from os.cpu_count().",
  75. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  76. )
  77. # parameters for column
  78. use_columns = hyperparams.Set(
  79. elements=hyperparams.Hyperparameter[int](-1),
  80. default=(),
  81. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  82. description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
  83. )
  84. exclude_columns = hyperparams.Set(
  85. elements=hyperparams.Hyperparameter[int](-1),
  86. default=(),
  87. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  88. description="A set of column indices to not operate on. Applicable only if \"use_columns\" is not provided.",
  89. )
  90. return_result = hyperparams.Enumeration(
  91. values=['append', 'replace', 'new'],
  92. default='new',
  93. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  94. description="Should parsed columns be appended, should they replace original columns, or should only parsed columns be returned? This hyperparam is ignored if use_semantic_types is set to false.",
  95. )
  96. use_semantic_types = hyperparams.UniformBool(
  97. default=False,
  98. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  99. description="Controls whether semantic_types metadata will be used for filtering columns in input dataframe. Setting this to false makes the code ignore return_result and will produce only the output dataframe"
  100. )
  101. add_index_columns = hyperparams.UniformBool(
  102. default=False,
  103. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  104. description="Also include primary index columns if input data has them. Applicable only if \"return_result\" is set to \"new\".",
  105. )
  106. error_on_no_input = hyperparams.UniformBool(
  107. default=True,
  108. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
  109. description="Throw an exception if no input column is selected/provided. Defaults to true to behave like sklearn. To prevent pipelines from breaking set this to False.",
  110. )
  111. return_semantic_type = hyperparams.Enumeration[str](
  112. values=['https://metadata.datadrivendiscovery.org/types/Attribute',
  113. 'https://metadata.datadrivendiscovery.org/types/ConstructedAttribute'],
  114. default='https://metadata.datadrivendiscovery.org/types/Attribute',
  115. description='Decides what semantic type to attach to generated attributes',
  116. semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']
  117. )
  118. class DCT:
  119. def __init__(self,type_,n,axis,overwrite_x,norm,workers):
  120. self._type = type_
  121. self._n = n
  122. self._axis = axis
  123. self._overwrite_x = overwrite_x
  124. self._norm = norm
  125. self._workers = workers
  126. def produce(self, inputs):
  127. dataframe = inputs
  128. processed_df = utils.pandas.DataFrame()
  129. try:
  130. for target_column in dataframe.columns :
  131. dct_input = dataframe[target_column].values
  132. dct_output = dct(x=dct_input,type=self._type,n=self._n,axis=self._axis,overwrite_x=self._overwrite_x,norm=self._norm,workers=self._workers)
  133. processed_df[target_column+"_dct_coeff"]=pd.Series(dct_output)
  134. except IndexError:
  135. logging.warning("Index not found in dataframe")
  136. return processed_df;
  137. class DiscreteCosineTransform(transformer.TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
  138. """
  139. Compute the 1-D discrete Cosine Transform.
  140. Return the Discrete Cosine Transform of arbitrary type sequence x.
  141. scipy documentation: https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dct.html#scipy.fft.dct
  142. Parameters
  143. ----------
  144. type_: int
  145. Type of the DCT. Default is 2
  146. n: int
  147. Length of the transformed axis of the output. If n is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros.
  148. axis: int
  149. Axis over which to compute the DCT. If not given, the last axis is used.
  150. norm: str
  151. Normalization mode. Default is None, meaning no normalization on the forward transforms and scaling by 1/n on the ifft. For norm=""ortho"", both directions are scaled by 1/sqrt(n).
  152. overwrite_x: boolean
  153. If True, the contents of x can be destroyed; the default is False. See the notes below for more details.
  154. workers: int
  155. Maximum number of workers to use for parallel computation. If negative, the value wraps around from os.cpu_count(). Defualt is None.
  156. use_columns: Set
  157. A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.
  158. exclude_columns: Set
  159. A set of column indices to not operate on. Applicable only if \"use_columns\" is not provided.
  160. return_result: Enumeration
  161. Should parsed columns be appended, should they replace original columns, or should only parsed columns be returned? This hyperparam is ignored if use_semantic_types is set to false.
  162. use_semantic_types: Bool
  163. Controls whether semantic_types metadata will be used for filtering columns in input dataframe. Setting this to false makes the code ignore return_result and will produce only the output dataframe.
  164. add_index_columns: Bool
  165. Also include primary index columns if input data has them. Applicable only if \"return_result\" is set to \"new\".
  166. error_on_no_input: Bool(
  167. Throw an exception if no input column is selected/provided. Defaults to true to behave like sklearn. To prevent pipelines from breaking set this to False.
  168. return_semantic_type: Enumeration[str](
  169. Decides what semantic type to attach to generated attributes'
  170. """
  171. __author__ = "Data Lab"
  172. metadata = metadata_base.PrimitiveMetadata(
  173. {
  174. "__author__ " : "DATA Lab at Texas A&M University",
  175. 'name': "Discrete Cosine Transform",
  176. 'python_path': 'd3m.primitives.tods.feature_analysis.discrete_cosine_transform',
  177. 'source': {
  178. 'name': 'DATA Lab at Texas A&M University',
  179. 'contact': 'mailto:khlai037@tamu.edu',
  180. 'uris': [
  181. 'https://gitlab.com/lhenry15/tods.git',
  182. 'https://gitlab.com/lhenry15/tods/-/blob/purav/anomaly-primitives/anomaly_primitives/DiscreteCosineTransform.py',
  183. ],
  184. },
  185. 'algorithm_types': [
  186. metadata_base.PrimitiveAlgorithmType.DISCRETE_COSINE_TRANSFORM,
  187. ],
  188. 'primitive_family': metadata_base.PrimitiveFamily.FEATURE_CONSTRUCTION,
  189. 'id': '584fa7d5-39cc-4cf8-8d5b-5f3a2648f767',
  190. 'hyperparameters_to_tune':['n','norm','axis','type_'],
  191. 'version': '0.0.1',
  192. },
  193. )
  194. def __init__(self, *, hyperparams: Hyperparams) -> None:
  195. super().__init__(hyperparams=hyperparams)
  196. self._clf = DCT(type_=self.hyperparams['type_'],
  197. n=self.hyperparams['n'],
  198. axis=self.hyperparams['axis'],
  199. overwrite_x=self.hyperparams['overwrite_x'],
  200. norm = self.hyperparams['norm'],
  201. workers = self.hyperparams['workers']
  202. )
  203. def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> base.CallResult[Outputs]:
  204. """
  205. Args:
  206. inputs: Container DataFrame
  207. Returns:
  208. Container DataFrame added with DCT coefficients in a column named 'column_name_dct_coeff'
  209. """
  210. assert isinstance(inputs, container.DataFrame), type(dataframe)
  211. self._fitted = False
  212. self._training_inputs, self._training_indices = self._get_columns_to_fit(inputs, self.hyperparams)
  213. self._input_column_names = self._training_inputs.columns
  214. if len(self._training_indices) > 0:
  215. # self._clf.fit(self._training_inputs)
  216. self._fitted = True
  217. else:
  218. if self.hyperparams['error_on_no_input']:
  219. raise RuntimeError("No input columns were selected")
  220. self.logger.warn("No input columns were selected")
  221. if not self._fitted:
  222. raise PrimitiveNotFittedError("Primitive not fitted.")
  223. sk_inputs = inputs
  224. if self.hyperparams['use_semantic_types']:
  225. cols = [inputs.columns[x] for x in self._training_indices]
  226. sk_inputs = container.DataFrame(data = inputs.iloc[:, self._training_indices].values,columns = cols, generate_metadata=True)
  227. output_columns = []
  228. if len(self._training_indices) > 0:
  229. sk_output = self._clf.produce(sk_inputs)
  230. if sparse.issparse(sk_output):
  231. sk_output = sk_output.toarray()
  232. outputs = self._wrap_predictions(inputs, sk_output)
  233. # if len(outputs.columns) == len(self._input_column_names):
  234. # outputs.columns = self._input_column_names
  235. output_columns = [outputs]
  236. else:
  237. if self.hyperparams['error_on_no_input']:
  238. raise RuntimeError("No input columns were selected")
  239. self.logger.warn("No input columns were selected")
  240. outputs = base_utils.combine_columns(return_result=self.hyperparams['return_result'],
  241. add_index_columns=self.hyperparams['add_index_columns'],
  242. inputs=inputs, column_indices=self._training_indices,
  243. columns_list=output_columns)
  244. return base.CallResult(outputs)
  245. @classmethod
  246. def _get_columns_to_fit(cls, inputs: Inputs, hyperparams: Hyperparams):
  247. """
  248. Select columns to fit.
  249. Args:
  250. inputs: Container DataFrame
  251. hyperparams: d3m.metadata.hyperparams.Hyperparams
  252. Returns:
  253. list
  254. """
  255. if not hyperparams['use_semantic_types']:
  256. return inputs, list(range(len(inputs.columns)))
  257. # return inputs, list(hyperparams['use_columns'])
  258. inputs_metadata = inputs.metadata
  259. def can_produce_column(column_index: int) -> bool:
  260. return cls._can_produce_column(inputs_metadata, column_index, hyperparams)
  261. columns_to_produce, columns_not_to_produce = base_utils.get_columns_to_use(inputs_metadata,
  262. use_columns=hyperparams['use_columns'],
  263. exclude_columns=hyperparams['exclude_columns'],
  264. can_use_column=can_produce_column)
  265. return inputs.iloc[:, columns_to_produce], columns_to_produce
  266. # return columns_to_produce
  267. @classmethod
  268. def _can_produce_column(cls, inputs_metadata: metadata_base.DataMetadata, column_index: int,
  269. hyperparams: Hyperparams) -> bool:
  270. """
  271. Output whether a column can be processed.
  272. Args:
  273. inputs_metadata: d3m.metadata.base.DataMetadata
  274. column_index: int
  275. Returns:
  276. bool
  277. """
  278. column_metadata = inputs_metadata.query((metadata_base.ALL_ELEMENTS, column_index))
  279. accepted_structural_types = (int, float, np.integer, np.float64,str)
  280. accepted_semantic_types = set()
  281. accepted_semantic_types.add("https://metadata.datadrivendiscovery.org/types/Attribute")
  282. if not issubclass(column_metadata['structural_type'], accepted_structural_types):
  283. print(column_index, "does not match the structural_type requirements in metadata. Skipping column")
  284. return False
  285. semantic_types = set(column_metadata.get('semantic_types', []))
  286. # print("length sematic type",len(semantic_types))
  287. # returing true for testing purposes for custom dataframes
  288. return True;
  289. if len(semantic_types) == 0:
  290. cls.logger.warning("No semantic types found in column metadata")
  291. return False
  292. # Making sure all accepted_semantic_types are available in semantic_types
  293. if len(accepted_semantic_types - semantic_types) == 0:
  294. return True
  295. print(semantic_types)
  296. return False
  297. @classmethod
  298. def _get_target_columns_metadata(cls, outputs_metadata: metadata_base.DataMetadata, hyperparams) -> List[OrderedDict]:
  299. """
  300. Output metadata of selected columns.
  301. Args:
  302. outputs_metadata: metadata_base.DataMetadata
  303. hyperparams: d3m.metadata.hyperparams.Hyperparams
  304. Returns:
  305. d3m.metadata.base.DataMetadata
  306. """
  307. outputs_length = outputs_metadata.query((metadata_base.ALL_ELEMENTS,))['dimension']['length']
  308. target_columns_metadata: List[OrderedDict] = []
  309. for column_index in range(outputs_length):
  310. column_metadata = OrderedDict(outputs_metadata.query_column(column_index))
  311. # Update semantic types and prepare it for predicted targets.
  312. semantic_types = set(column_metadata.get('semantic_types', []))
  313. semantic_types_to_remove = set([])
  314. add_semantic_types = []
  315. add_semantic_types.add(hyperparams["return_semantic_type"])
  316. semantic_types = semantic_types - semantic_types_to_remove
  317. semantic_types = semantic_types.union(add_semantic_types)
  318. column_metadata['semantic_types'] = list(semantic_types)
  319. target_columns_metadata.append(column_metadata)
  320. return target_columns_metadata
  321. @classmethod
  322. def _update_predictions_metadata(cls, inputs_metadata: metadata_base.DataMetadata, outputs: Optional[Outputs],
  323. target_columns_metadata: List[OrderedDict]) -> metadata_base.DataMetadata:
  324. """
  325. Updata metadata for selected columns.
  326. Args:
  327. inputs_metadata: metadata_base.DataMetadata
  328. outputs: Container Dataframe
  329. target_columns_metadata: list
  330. Returns:
  331. d3m.metadata.base.DataMetadata
  332. """
  333. outputs_metadata = metadata_base.DataMetadata().generate(value=outputs)
  334. for column_index, column_metadata in enumerate(target_columns_metadata):
  335. column_metadata.pop("structural_type", None)
  336. outputs_metadata = outputs_metadata.update_column(column_index, column_metadata)
  337. return outputs_metadata
  338. def _wrap_predictions(self, inputs: Inputs, predictions: ndarray) -> Outputs:
  339. """
  340. Wrap predictions into dataframe
  341. Args:
  342. inputs: Container Dataframe
  343. predictions: array-like data (n_samples, n_features)
  344. Returns:
  345. Dataframe
  346. """
  347. outputs = container.DataFrame(predictions, generate_metadata=True)
  348. target_columns_metadata = self._add_target_columns_metadata(outputs.metadata,self.hyperparams)
  349. outputs.metadata = self._update_predictions_metadata(inputs.metadata, outputs, target_columns_metadata)
  350. # print(outputs.metadata.to_internal_simple_structure())
  351. return outputs
  352. @classmethod
  353. def _add_target_columns_metadata(cls, outputs_metadata: metadata_base.DataMetadata, hyperparams):
  354. """
  355. Add target columns metadata
  356. Args:
  357. outputs_metadata: metadata.base.DataMetadata
  358. hyperparams: d3m.metadata.hyperparams.Hyperparams
  359. Returns:
  360. List[OrderedDict]
  361. """
  362. outputs_length = outputs_metadata.query((metadata_base.ALL_ELEMENTS,))['dimension']['length']
  363. target_columns_metadata: List[OrderedDict] = []
  364. for column_index in range(outputs_length):
  365. # column_name = "output_{}".format(column_index)
  366. column_metadata = OrderedDict()
  367. semantic_types = set()
  368. semantic_types.add(hyperparams["return_semantic_type"])
  369. column_metadata['semantic_types'] = list(semantic_types)
  370. # column_metadata["name"] = str(column_name)
  371. target_columns_metadata.append(column_metadata)
  372. return target_columns_metadata
  373. DiscreteCosineTransform.__doc__ = DiscreteCosineTransform.__doc__

全栈的自动化机器学习系统,主要针对多变量时间序列数据的异常检测。TODS提供了详尽的用于构建基于机器学习的异常检测系统的模块,它们包括:数据处理(data processing),时间序列处理( time series processing),特征分析(feature analysis),检测算法(detection algorithms),和强化模块( reinforcement module)。这些模块所提供的功能包括常见的数据预处理、时间序列数据的平滑或变换,从时域或频域中抽取特征、多种多样的检测算