sklearn.pipeline.Pipeline(columntransformer=sklearn.compose._column_transfo
rmer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.pr
eprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.St
andardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.imput
e._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotE
ncoder)),variancethreshold=sklearn.feature_selection.variance_threshold.Var
ianceThreshold,extratreesclassifier=sklearn.ensemble.forest.ExtraTreesClass
ifier)(1) | Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The transformers in the pipeline can be cached using ``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
A step's estimator may be replaced entirely by setting the parameter
with its name to another estimator, or a transformer removed by setting
it to 'passthrough' or ``None``. |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(2)_n_jobs | null |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(2)_remainder | "passthrough" |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(2)_sparse_threshold | 0.3 |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(2)_transformer_weights | null |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(2)_transformers | [{"oml-python:serialized_object": "component_reference", "value": {"key": "numeric", "step_name": "numeric", "argument_1": []}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "nominal", "step_name": "nominal", "argument_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]}}] |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(2)_verbose | false |
sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler)(7)_memory | null |
sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler)(7)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "imputer", "step_name": "imputer"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "standardscaler", "step_name": "standardscaler"}}] |
sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler)(7)_verbose | false |
sklearn.preprocessing.imputation.Imputer(49)_axis | 0 |
sklearn.preprocessing.imputation.Imputer(49)_copy | true |
sklearn.preprocessing.imputation.Imputer(49)_missing_values | "NaN" |
sklearn.preprocessing.imputation.Imputer(49)_strategy | "mean" |
sklearn.preprocessing.imputation.Imputer(49)_verbose | 0 |
sklearn.preprocessing.data.StandardScaler(35)_copy | true |
sklearn.preprocessing.data.StandardScaler(35)_with_mean | true |
sklearn.preprocessing.data.StandardScaler(35)_with_std | true |
sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)(3)_memory | null |
sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)(3)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "simpleimputer", "step_name": "simpleimputer"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "onehotencoder", "step_name": "onehotencoder"}}] |
sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)(3)_verbose | false |
sklearn.impute._base.SimpleImputer(11)_add_indicator | false |
sklearn.impute._base.SimpleImputer(11)_copy | true |
sklearn.impute._base.SimpleImputer(11)_fill_value | -1 |
sklearn.impute._base.SimpleImputer(11)_missing_values | NaN |
sklearn.impute._base.SimpleImputer(11)_strategy | "constant" |
sklearn.impute._base.SimpleImputer(11)_verbose | 0 |
sklearn.preprocessing._encoders.OneHotEncoder(16)_categorical_features | null |
sklearn.preprocessing._encoders.OneHotEncoder(16)_categories | null |
sklearn.preprocessing._encoders.OneHotEncoder(16)_drop | null |
sklearn.preprocessing._encoders.OneHotEncoder(16)_dtype | {"oml-python:serialized_object": "type", "value": "np.float64"} |
sklearn.preprocessing._encoders.OneHotEncoder(16)_handle_unknown | "ignore" |
sklearn.preprocessing._encoders.OneHotEncoder(16)_n_values | null |
sklearn.preprocessing._encoders.OneHotEncoder(16)_sparse | true |
sklearn.feature_selection.variance_threshold.VarianceThreshold(27)_threshold | 0.0 |
sklearn.pipeline.Pipeline(columntransformer=sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)),variancethreshold=sklearn.feature_selection.variance_threshold.VarianceThreshold,extratreesclassifier=sklearn.ensemble.forest.ExtraTreesClassifier)(1)_memory | null |
sklearn.pipeline.Pipeline(columntransformer=sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)),variancethreshold=sklearn.feature_selection.variance_threshold.VarianceThreshold,extratreesclassifier=sklearn.ensemble.forest.ExtraTreesClassifier)(1)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "columntransformer", "step_name": "columntransformer"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "variancethreshold", "step_name": "variancethreshold"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "extratreesclassifier", "step_name": "extratreesclassifier"}}] |
sklearn.pipeline.Pipeline(columntransformer=sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)),variancethreshold=sklearn.feature_selection.variance_threshold.VarianceThreshold,extratreesclassifier=sklearn.ensemble.forest.ExtraTreesClassifier)(1)_verbose | false |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_bootstrap | false |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_class_weight | null |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_criterion | "gini" |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_max_depth | null |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_max_features | "auto" |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_max_leaf_nodes | null |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_min_impurity_decrease | 0.0 |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_min_impurity_split | null |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_min_samples_leaf | 1 |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_min_samples_split | 2 |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_min_weight_fraction_leaf | 0.0 |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_n_estimators | "warn" |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_n_jobs | null |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_oob_score | false |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_random_state | 0 |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_verbose | 0 |
sklearn.ensemble.forest.ExtraTreesClassifier(13)_warm_start | false |