sklearn.pipeline.Pipeline(columntransformer=sklearn.compose._column_transfo
rmer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.pr
eprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.St
andardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.imput
e._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotE
ncoder)),variancethreshold=sklearn.feature_selection.variance_threshold.Var
ianceThreshold,extratreesclassifier=sklearn.ensemble.forest.ExtraTreesClass
ifier)(2) | Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The transformers in the pipeline can be cached using ``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
A step's estimator may be replaced entirely by setting the parameter
with its name to another estimator, or a transformer removed by setting
it to 'passthrough' or ``None``. |
sklearn.impute._base.SimpleImputer(10)_add_indicator | false |
sklearn.impute._base.SimpleImputer(10)_copy | true |
sklearn.impute._base.SimpleImputer(10)_fill_value | -1 |
sklearn.impute._base.SimpleImputer(10)_missing_values | NaN |
sklearn.impute._base.SimpleImputer(10)_strategy | "constant" |
sklearn.impute._base.SimpleImputer(10)_verbose | 0 |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(3)_n_jobs | null |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(3)_remainder | "passthrough" |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(3)_sparse_threshold | 0.3 |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(3)_transformer_weights | null |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(3)_transformers | [{"oml-python:serialized_object": "component_reference", "value": {"key": "numeric", "step_name": "numeric", "argument_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "nominal", "step_name": "nominal", "argument_1": []}}] |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(3)_verbose | false |
sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler)(8)_memory | null |
sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler)(8)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "imputer", "step_name": "imputer"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "standardscaler", "step_name": "standardscaler"}}] |
sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler)(8)_verbose | false |
sklearn.preprocessing.imputation.Imputer(50)_axis | 0 |
sklearn.preprocessing.imputation.Imputer(50)_copy | true |
sklearn.preprocessing.imputation.Imputer(50)_missing_values | "NaN" |
sklearn.preprocessing.imputation.Imputer(50)_strategy | "mean" |
sklearn.preprocessing.imputation.Imputer(50)_verbose | 0 |
sklearn.preprocessing.data.StandardScaler(36)_copy | true |
sklearn.preprocessing.data.StandardScaler(36)_with_mean | true |
sklearn.preprocessing.data.StandardScaler(36)_with_std | true |
sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)(4)_memory | null |
sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)(4)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "simpleimputer", "step_name": "simpleimputer"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "onehotencoder", "step_name": "onehotencoder"}}] |
sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)(4)_verbose | false |
sklearn.preprocessing._encoders.OneHotEncoder(17)_categorical_features | null |
sklearn.preprocessing._encoders.OneHotEncoder(17)_categories | null |
sklearn.preprocessing._encoders.OneHotEncoder(17)_drop | null |
sklearn.preprocessing._encoders.OneHotEncoder(17)_dtype | {"oml-python:serialized_object": "type", "value": "np.float64"} |
sklearn.preprocessing._encoders.OneHotEncoder(17)_handle_unknown | "ignore" |
sklearn.preprocessing._encoders.OneHotEncoder(17)_n_values | null |
sklearn.preprocessing._encoders.OneHotEncoder(17)_sparse | true |
sklearn.feature_selection.variance_threshold.VarianceThreshold(28)_threshold | 0.0 |
sklearn.pipeline.Pipeline(columntransformer=sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)),variancethreshold=sklearn.feature_selection.variance_threshold.VarianceThreshold,extratreesclassifier=sklearn.ensemble.forest.ExtraTreesClassifier)(2)_memory | null |
sklearn.pipeline.Pipeline(columntransformer=sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)),variancethreshold=sklearn.feature_selection.variance_threshold.VarianceThreshold,extratreesclassifier=sklearn.ensemble.forest.ExtraTreesClassifier)(2)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "columntransformer", "step_name": "columntransformer"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "variancethreshold", "step_name": "variancethreshold"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "extratreesclassifier", "step_name": "extratreesclassifier"}}] |
sklearn.pipeline.Pipeline(columntransformer=sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)),variancethreshold=sklearn.feature_selection.variance_threshold.VarianceThreshold,extratreesclassifier=sklearn.ensemble.forest.ExtraTreesClassifier)(2)_verbose | false |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_bootstrap | false |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_class_weight | null |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_criterion | "gini" |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_max_depth | null |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_max_features | "auto" |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_max_leaf_nodes | null |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_min_impurity_decrease | 0.0 |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_min_impurity_split | null |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_min_samples_leaf | 1 |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_min_samples_split | 2 |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_min_weight_fraction_leaf | 0.0 |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_n_estimators | "warn" |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_n_jobs | null |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_oob_score | false |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_random_state | 0 |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_verbose | 0 |
sklearn.ensemble.forest.ExtraTreesClassifier(14)_warm_start | false |