Former-commit-id:mastere74f79855b[formerly75f79cbfde] [formerlya7ba37c144[formerlyefaaff5c77]] [formerlyf64c651805[formerly1e2e6b28e7] [formerly7726d60297[formerly61ee710811]]] [formerly87600023f7[formerlye8b46a964b] [formerly7dae957365[formerly99de894018]] [formerly9dc849c1be[formerlybb69dcb687] [formerly0b58785800[formerlydd3e068024]]]] [formerlyb0b178e9f7[formerly6573ce4e51] [formerly92cd821b55[formerlyeffe734de1]] [formerlyfb798b09c0[formerly0eb9a5fc82] [formerlyb2e75d16f0[formerly7c7db98c86]]] [formerly1b29895bf1[formerlyc2aec31f57] [formerly02c5bbae3b[formerly55d5908b63]] [formerly6ae132e87c[formerlye187b62996] [formerlybe49170dea[formerly8c9408b1d9]]]]] [formerly3b99321094[formerly946346f187] [formerly032beab816[formerly384f339f29]] [formerly69b390b59c[formerly40782f2973] [formerly1308774ad8[formerly4638da6f49]]] [formerly9ff1a65be4[formerlye0943627a2] [formerlyca86bde92f[formerly171d1ee438]] [formerly3a1ede9b17[formerly9ed521c461] [formerly512244e5a5[formerlya8d3a46ddf]]]] [formerly2282882307[formerly7bed7631e3] [formerlybd601f44ac[formerlya749ed909a]] [formerlyc7d0981fca[formerlyd8238ca2c4] [formerly07bb60cdcd[formerlyb1c9adbf16]]] [formerly83c0a222d2[formerly8591c568bf] [formerly8b4c3b0fbb[formerlybb59cbedab]] [formerlyd6d19caab2[formerly9b5f870edd] [formerlya9816137c9[formerlyf7e6aadb4c]]]]]] Former-commit-id:b329e094a9[formerly7442cf14e2] [formerlye8209d2231[formerly7cb6d0a12e]] [formerly1ef96ea153[formerlyd9630d0834] [formerly32e083435e[formerly22e3c77c44]]] [formerly75bae6862d[formerly0bce192abd] [formerly0291c01c9d[formerlya40e19ad0b]] [formerly3baa515f14[formerlyf1ec922137] [formerlyf995326947[formerlyf71380f8e2]]]] [formerlyc48085177e[formerlye54827a2ce] [formerly3d01618bf3[formerly3cd4aee5b6]] [formerly7df4fa74bf[formerlyc0fb14fdd2] [formerly2b06a008a6[formerly3e302ae408]]] [formerly6efaa48f31[formerlyde277037b5] [formerly4561716aab[formerly9c8af6e8c7]] [formerlyf7cf457399[formerlydbdedf45ca] [formerlya9816137c9]]]] Former-commit-id:a8b47fc969[formerly6ba8ac6512] [formerly280f9bded8[formerly46b53d9ba0]] [formerly38c84534a4[formerlyc5433976eb] [formerly864f47618b[formerlyb9452de098]]] [formerly967ed9c582[formerlyf1cf7a553c] [formerlyb76f5fe2df[formerly2b0b040ac1]] [formerly84a2d919e4[formerly922b076818] [formerly14c6dcecfa[formerly6562548a85]]]] Former-commit-id:d47b7f4ab3[formerlyd645d111bc] [formerlyc4e7c84376[formerlycd9cb43806]] [formerly3b4cfbe2ba[formerlyc470dfcf89] [formerlya576c30418[formerly6fd906b1f4]]] Former-commit-id:19d79a6bf2[formerlya51be09677] [formerly1822886f7b[formerly118376e49d]] Former-commit-id:a595abfec4[formerlyf39340aca2] Former-commit-id:789af07c07
| @@ -1,70 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
| pipeline_description.add_step(step_3) | |||
| attributes = 'steps.2.produce' | |||
| targets = 'steps.3.produce' | |||
| # Step 4: imputer | |||
| step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_4.add_output('produce') | |||
| pipeline_description.add_step(step_4) | |||
| # Step 5: ABOD | |||
| step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_abod')) | |||
| step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') | |||
| step_5.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_5.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_5.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2, 4,)) | |||
| step_5.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='replace') | |||
| step_5.add_output('produce') | |||
| pipeline_description.add_step(step_5) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,67 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
| pipeline_description.add_step(step_3) | |||
| attributes = 'steps.2.produce' | |||
| targets = 'steps.3.produce' | |||
| # Step 4: imputer | |||
| step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_4.add_output('produce') | |||
| pipeline_description.add_step(step_4) | |||
| # Step 5: auto encoder | |||
| step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_ae')) | |||
| step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_5.add_output('produce') | |||
| pipeline_description.add_step(step_5) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,71 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import numpy as np | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: Standardization | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(1,2,3,4,5,)) | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='new') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # # Step 4: test primitive | |||
| primitive_4 = index.get_primitive('d3m.primitives.tods.detection_algorithm.AutoRegODetector') | |||
| step_4 = PrimitiveStep(primitive=primitive_4) | |||
| step_4.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_4.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=10) | |||
| # step_4.add_hyperparameter(name='weights', argument_type=ArgumentType.VALUE, data=weights_ndarray) | |||
| step_4.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=False) | |||
| # step_4.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) # There is sth wrong with multi-dimensional | |||
| step_4.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_4.add_hyperparameter(name='return_subseq_inds', argument_type=ArgumentType.VALUE, data=True) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.3.produce') | |||
| step_4.add_output('produce') | |||
| step_4.add_output('produce_score') | |||
| pipeline_description.add_step(step_4) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.4.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,50 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.axiswise_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,44 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: BKFilter | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.feature_analysis.bk_filter')) | |||
| # step_2.add_hyperparameter(name = 'columns_using_method', argument_type=ArgumentType.VALUE, data = 'name') | |||
| step_2.add_hyperparameter(name = 'use_semantic_types', argument_type=ArgumentType.VALUE, data = True) | |||
| step_2.add_hyperparameter(name = 'use_columns', argument_type=ArgumentType.VALUE, data = (2,3)) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,51 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_cblof') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,48 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: Column Parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: Categorical to Binary | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.data_processing.categorical_to_binary') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(3,)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,49 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| #Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.feature_analysis.auto_correlation') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name="use_semantic_types", argument_type=ArgumentType.VALUE, data = True) | |||
| step_2.add_hyperparameter(name="use_columns", argument_type=ArgumentType.VALUE, data = (2, 3)) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.data_processing.column_filter') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,43 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: ContinuityValidation | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.continuity_validation')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name = 'continuity_option', argument_type=ArgumentType.VALUE, data = 'imputation') | |||
| step_2.add_hyperparameter(name = 'interval', argument_type=ArgumentType.VALUE, data = 0.3) | |||
| # Or: | |||
| # step_2.add_hyperparameter(name = 'continuity_option', argument_type=ArgumentType.VALUE, data = 'ablation') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,49 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.deeplog') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| #step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,50 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: Column Parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: Discrete Cosine Transform | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.feature_analysis.discrete_cosine_transform') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,42 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: DuplicationValidation | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.duplication_validation')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name = 'keep_option', argument_type=ArgumentType.VALUE, data = 'average') # Or: 'first' | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,48 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: Column Parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: Fast Fourier Transform | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.feature_analysis.fast_fourier_transform') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,68 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
| pipeline_description.add_step(step_3) | |||
| attributes = 'steps.2.produce' | |||
| targets = 'steps.3.produce' | |||
| # Step 4: imputer | |||
| step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_4.add_output('produce') | |||
| pipeline_description.add_step(step_4) | |||
| # Step 5: HBOS | |||
| step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_hbos')) | |||
| step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') | |||
| step_5.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| # step_5.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_5.add_output('produce') | |||
| pipeline_description.add_step(step_5) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,71 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
| pipeline_description.add_step(step_3) | |||
| attributes = 'steps.2.produce' | |||
| targets = 'steps.3.produce' | |||
| # Step 4: imputer | |||
| step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_4.add_output('produce') | |||
| pipeline_description.add_step(step_4) | |||
| # Step 5: HBOS | |||
| step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_hbos')) | |||
| step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') | |||
| step_5.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_5.add_hyperparameter(name='return_subseq_inds', argument_type=ArgumentType.VALUE, data=True) | |||
| # step_5.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_5.add_output('produce_score') | |||
| step_5.add_output('produce') | |||
| pipeline_description.add_step(step_5) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
| # pipeline_description.add_output(name='output score', data_reference='steps.5.produce_score') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,46 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: HPFilter | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.feature_analysis.hp_filter')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name = 'use_columns', argument_type=ArgumentType.VALUE, data = [2,3,6]) | |||
| step_2.add_hyperparameter(name = 'use_semantic_types', argument_type=ArgumentType.VALUE, data = True) | |||
| step_2.add_hyperparameter(name = 'return_result', argument_type=ArgumentType.VALUE, data = 'append') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,76 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
| pipeline_description.add_step(step_3) | |||
| attributes = 'steps.2.produce' | |||
| targets = 'steps.3.produce' | |||
| # Step 4: imputer | |||
| step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_4.add_output('produce') | |||
| pipeline_description.add_step(step_4) | |||
| # Step 5: holt smoothing | |||
| step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.holt_smoothing')) | |||
| step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_5.add_hyperparameter(name="exclude_columns", argument_type=ArgumentType.VALUE, data = (2, 3)) | |||
| step_5.add_hyperparameter(name="use_semantic_types", argument_type=ArgumentType.VALUE, data = True) | |||
| step_5.add_output('produce') | |||
| pipeline_description.add_step(step_5) | |||
| # Step 6: isolation forest | |||
| #step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.anomaly_detection.isolation_forest.Algorithm')) | |||
| #step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||
| #step_6.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data_reference=targets) | |||
| #step_6.add_output('produce') | |||
| #pipeline_description.add_step(step_6) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,76 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
| pipeline_description.add_step(step_3) | |||
| attributes = 'steps.2.produce' | |||
| targets = 'steps.3.produce' | |||
| # Step 4: imputer | |||
| step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_4.add_output('produce') | |||
| pipeline_description.add_step(step_4) | |||
| # Step 5: holt winters exponential smoothing | |||
| step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.holt_winters_exponential_smoothing')) | |||
| step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_5.add_hyperparameter(name="use_columns", argument_type=ArgumentType.VALUE, data = (2, 3)) | |||
| step_5.add_hyperparameter(name="use_semantic_types", argument_type=ArgumentType.VALUE, data = True) | |||
| step_5.add_output('produce') | |||
| pipeline_description.add_step(step_5) | |||
| # Step 6: isolation forest | |||
| #step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.anomaly_detection.isolation_forest.Algorithm')) | |||
| #step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||
| #step_6.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data_reference=targets) | |||
| #step_6.add_output('produce') | |||
| #pipeline_description.add_step(step_6) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,59 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_iforest') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| # step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| # step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_hyperparameter(name='return_subseq_inds', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce_score') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.3.produce_score') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,71 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import numpy as np | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: Standardization | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(1,2,3,4,5,)) | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='new') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # # Step 4: test primitive | |||
| primitive_4 = index.get_primitive('d3m.primitives.tods.detection_algorithm.KDiscordODetector') | |||
| step_4 = PrimitiveStep(primitive=primitive_4) | |||
| step_4.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_4.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=10) | |||
| # step_4.add_hyperparameter(name='weights', argument_type=ArgumentType.VALUE, data=weights_ndarray) | |||
| step_4.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=False) | |||
| # step_4.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) # There is sth wrong with multi-dimensional | |||
| step_4.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_4.add_hyperparameter(name='return_subseq_inds', argument_type=ArgumentType.VALUE, data=True) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.3.produce') | |||
| step_4.add_output('produce') | |||
| step_4.add_output('produce_score') | |||
| pipeline_description.add_step(step_4) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.4.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,51 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_knn') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,51 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_loda') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,51 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_lof') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,70 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import numpy as np | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 2: Standardization | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(1,2,3,4,5,)) | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='new') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # # Step 3: test primitive | |||
| primitive_4 = index.get_primitive('d3m.primitives.tods.detection_algorithm.LSTMODetector') | |||
| step_4 = PrimitiveStep(primitive=primitive_4) | |||
| step_4.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_4.add_hyperparameter(name='diff_group_method', argument_type=ArgumentType.VALUE, data='average') | |||
| step_4.add_hyperparameter(name='feature_dim', argument_type=ArgumentType.VALUE, data=5) | |||
| step_4.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=False) | |||
| # step_4.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) # There is sth wrong with multi-dimensional | |||
| step_4.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_4.add_hyperparameter(name='return_subseq_inds', argument_type=ArgumentType.VALUE, data=True) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.3.produce') | |||
| step_4.add_output('produce') | |||
| pipeline_description.add_step(step_4) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.4.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,49 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.matrix_profile') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4)) # There is sth wrong with multi-dimensional | |||
| step_2.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=3) # There is sth wrong with multi-dimensional | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,77 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
| pipeline_description.add_step(step_3) | |||
| attributes = 'steps.2.produce' | |||
| targets = 'steps.3.produce' | |||
| # Step 4: imputer | |||
| step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_4.add_output('produce') | |||
| pipeline_description.add_step(step_4) | |||
| # Step 5: mean average transform | |||
| step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.moving_average_transform')) | |||
| step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_5.add_hyperparameter(name="use_columns", argument_type=ArgumentType.VALUE, data = (2, 3)) | |||
| step_5.add_hyperparameter(name="use_semantic_types", argument_type=ArgumentType.VALUE, data = True) | |||
| step_5.add_output('produce') | |||
| pipeline_description.add_step(step_5) | |||
| # Step 6: isolation forest | |||
| #step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.anomaly_detection.isolation_forest.Algorithm')) | |||
| #step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||
| #step_6.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data_reference=targets) | |||
| #step_6.add_output('produce') | |||
| #pipeline_description.add_step(step_6) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,50 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: Column Parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: Non Negative Matrix Factorization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.feature_analysis.non_negative_matrix_factorization') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_hyperparameter(name='rank', argument_type=ArgumentType.VALUE, data=5) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,51 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_ocsvm') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,71 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import numpy as np | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: Standardization | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(1,2,3,4,5,)) | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='new') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # # Step 4: test primitive | |||
| primitive_4 = index.get_primitive('d3m.primitives.tods.detection_algorithm.PCAODetector') | |||
| step_4 = PrimitiveStep(primitive=primitive_4) | |||
| step_4.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_4.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=10) | |||
| # step_4.add_hyperparameter(name='weights', argument_type=ArgumentType.VALUE, data=weights_ndarray) | |||
| step_4.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=False) | |||
| # step_4.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) # There is sth wrong with multi-dimensional | |||
| step_4.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_4.add_hyperparameter(name='return_subseq_inds', argument_type=ArgumentType.VALUE, data=True) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.3.produce') | |||
| step_4.add_output('produce') | |||
| step_4.add_output('produce_score') | |||
| pipeline_description.add_step(step_4) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.4.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,49 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.power_transformer') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,51 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_cof') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4)) # There is sth wrong with multi-dimensional | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,49 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.quantile_transformer') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,54 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.reinforcement.rule_filter')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2, 4,)) | |||
| step_3.add_hyperparameter(name='rule', argument_type=ArgumentType.VALUE, data='#4# % 2 == 0 and #2# <= 0.3') | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,49 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_sod') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4)) # There is sth wrong with multi-dimensional | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,76 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
| pipeline_description.add_step(step_3) | |||
| attributes = 'steps.2.produce' | |||
| targets = 'steps.3.produce' | |||
| # Step 4: imputer | |||
| step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_4.add_output('produce') | |||
| pipeline_description.add_step(step_4) | |||
| # Step 5: simple exponential smoothing | |||
| step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.simple_exponential_smoothing')) | |||
| step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_5.add_hyperparameter(name="use_columns", argument_type=ArgumentType.VALUE, data = (1,)) | |||
| step_5.add_hyperparameter(name="use_semantic_types", argument_type=ArgumentType.VALUE, data = True) | |||
| step_5.add_output('produce') | |||
| pipeline_description.add_step(step_5) | |||
| # Step 6: isolation forest | |||
| #step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.anomaly_detection.isolation_forest.Algorithm')) | |||
| #step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||
| #step_6.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data_reference=targets) | |||
| #step_6.add_output('produce') | |||
| #pipeline_description.add_step(step_6) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,49 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,44 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: TRMF | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.feature_analysis.trmf')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name = 'lags', argument_type=ArgumentType.VALUE, data = [1,2,10,100]) | |||
| # step_2.add_hyperparameter(name = 'K', argument_type=ArgumentType.VALUE, data = 3) | |||
| # step_2.add_hyperparameter(name = 'use_columns', argument_type=ArgumentType.VALUE, data = (2, 3, 4, 5, 6)) | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,48 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: Column Parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: Fast Fourier Transform | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.telemanom') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,86 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: dataframe transformation | |||
| # primitive_1 = index.get_primitive('d3m.primitives.data_transformation.SKPowerTransformer') | |||
| # primitive_1 = index.get_primitive('d3m.primitives.data_transformation.SKStandardization') | |||
| # primitive_1 = index.get_primitive('d3m.primitives.data_transformation.SKQuantileTransformer') | |||
| #Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.data_processing.time_interval_transform') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name="time_interval", argument_type=ArgumentType.VALUE, data = '5T') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # | |||
| # # Step 2: column_parser | |||
| # step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| # step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| # step_2.add_output('produce') | |||
| # pipeline_description.add_step(step_2) | |||
| # | |||
| # | |||
| # # Step 3: extract_columns_by_semantic_types(attributes) | |||
| # step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| # step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| # step_3.add_output('produce') | |||
| # step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| # data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| # pipeline_description.add_step(step_3) | |||
| # | |||
| # # Step 4: extract_columns_by_semantic_types(targets) | |||
| # step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| # step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| # step_4.add_output('produce') | |||
| # step_4.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| # data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
| # pipeline_description.add_step(step_4) | |||
| # | |||
| # attributes = 'steps.3.produce' | |||
| # targets = 'steps.4.produce' | |||
| # | |||
| # # Step 5: imputer | |||
| # step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_cleaning.imputer.SKlearn')) | |||
| # step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| # step_5.add_output('produce') | |||
| # pipeline_description.add_step(step_5) | |||
| # | |||
| # # Step 6: random_forest | |||
| # step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.regression.random_forest.SKlearn')) | |||
| # step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||
| # step_6.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data_reference=targets) | |||
| # step_6.add_output('produce') | |||
| # pipeline_description.add_step(step_6) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.1.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,44 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: TruncatedSVD | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.feature_analysis.truncated_svd')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name = 'n_components', argument_type=ArgumentType.VALUE, data = 3) | |||
| step_2.add_hyperparameter(name = 'use_columns', argument_type=ArgumentType.VALUE, data = (2, 3, 4, 5, 6)) | |||
| step_2.add_hyperparameter(name = 'return_result', argument_type=ArgumentType.VALUE, data = 'append') | |||
| step_2.add_hyperparameter(name = 'use_semantic_types', argument_type=ArgumentType.VALUE, data = True) | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,67 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
| pipeline_description.add_step(step_3) | |||
| attributes = 'steps.2.produce' | |||
| targets = 'steps.3.produce' | |||
| # Step 4: imputer | |||
| step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
| step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_4.add_output('produce') | |||
| pipeline_description.add_step(step_4) | |||
| # Step 5: variatinal auto encoder | |||
| step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_vae')) | |||
| step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
| step_5.add_output('produce') | |||
| pipeline_description.add_step(step_5) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,64 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test WaveletTransform | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.feature_analysis.wavelet_transform') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='wavelet', argument_type=ArgumentType.VALUE, data='db8') | |||
| step_2.add_hyperparameter(name='level', argument_type=ArgumentType.VALUE, data=2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='new') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 2: test inverse WaveletTransform | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.wavelet_transform') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='wavelet', argument_type=ArgumentType.VALUE, data='db8') | |||
| step_3.add_hyperparameter(name='level', argument_type=ArgumentType.VALUE, data=2) | |||
| step_3.add_hyperparameter(name='inverse', argument_type=ArgumentType.VALUE, data=1) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=False) | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='new') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,50 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_mogaal') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,50 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: test primitive | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_sogaal') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| print(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,61 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.spectral_residual_transform') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='avg_filter_dimension', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(8,9,10,11,12)) # There is sth wrong with multi-dimensional | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_abs_energy') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(8,9,10,11,12)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_abs_sum') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(8,9,10,11,12)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_g_mean') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_h_mean') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_kurtosis') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_maximum') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_mean') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_mean_abs') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_mean_abs_temporal_derivative') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_mean_temporal_derivative') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_median') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,63 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_median_abs_deviation') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_minimum') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_skew') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_std') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_var') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_variation') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_vec_sum') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_willison_amplitude') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,62 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_zero_crossing') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(9,10)) # There is sth wrong with multi-dimensional | |||
| step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -1,61 +0,0 @@ | |||
| from d3m import index | |||
| from d3m.metadata.base import ArgumentType | |||
| from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
| from d3m.metadata import hyperparams | |||
| import copy | |||
| # -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
| # extract_columns_by_semantic_types(targets) -> ^ | |||
| # Creating pipeline | |||
| pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # # Step 2: Standardization | |||
| primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
| step_2 = PrimitiveStep(primitive=primitive_2) | |||
| step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
| step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| pipeline_description.add_step(step_2) | |||
| # # Step 3: test primitive | |||
| # primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
| primitive_3 = index.get_primitive('d3m.primitives.tods.timeseries_processing.decomposition.time_series_seasonality_trend_decomposition') | |||
| step_3 = PrimitiveStep(primitive=primitive_3) | |||
| step_3.add_hyperparameter(name='period', argument_type=ArgumentType.VALUE, data=5) | |||
| step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
| step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(8,9,10,11,12)) # There is sth wrong with multi-dimensional | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
| step_3.add_output('produce') | |||
| pipeline_description.add_step(step_3) | |||
| # Final Output | |||
| pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
| # Output to YAML | |||
| yaml = pipeline_description.to_yaml() | |||
| with open('pipeline.yml', 'w') as f: | |||
| f.write(yaml) | |||
| # Or you can output json | |||
| #data = pipline_description.to_json() | |||
| @@ -14,13 +14,13 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -28,7 +28,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -16,13 +16,13 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -30,7 +30,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -19,14 +19,14 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| @@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -15,7 +15,7 @@ pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| @@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -18,7 +18,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: Column Parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -10,14 +10,14 @@ pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| #Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| @@ -13,7 +13,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| @@ -11,14 +11,14 @@ pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -18,7 +18,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: Column Parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -15,7 +15,7 @@ pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| @@ -18,7 +18,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: Column Parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -14,13 +14,13 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -28,7 +28,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -14,13 +14,13 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -28,7 +28,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -8,14 +8,14 @@ pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) | |||
| step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| @@ -17,13 +17,13 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -31,7 +31,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -17,13 +17,13 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -31,7 +31,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -19,14 +19,14 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| @@ -12,21 +12,21 @@ pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| @@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -19,14 +19,14 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| @@ -18,7 +18,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -17,14 +17,14 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -32,7 +32,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
| pipeline_description.add_step(step_2) | |||
| # Step 3: extract_columns_by_semantic_types(targets) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_3.add_output('produce') | |||
| step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
| @@ -18,7 +18,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: Column Parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -12,21 +12,21 @@ pipeline_description = Pipeline() | |||
| pipeline_description.add_input(name='inputs') | |||
| # Step 0: dataset_to_dataframe | |||
| primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
| primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
| step_0 = PrimitiveStep(primitive=primitive_0) | |||
| step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
| step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| @@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| @@ -14,13 +14,13 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # Step 1: column_parser | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
| step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||
| pipeline_description.add_step(step_1) | |||
| # Step 2: extract_columns_by_semantic_types(attributes) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
| step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
| step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
| step_2.add_output('produce') | |||
| step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
| @@ -18,7 +18,7 @@ step_0.add_output('produce') | |||
| pipeline_description.add_step(step_0) | |||
| # # Step 1: column_parser | |||
| primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
| primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
| step_1 = PrimitiveStep(primitive=primitive_1) | |||
| step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
| step_1.add_output('produce') | |||