sklearn.pipeline.Pipeline(Preprocessing=sklearn.compose._column_transformer
.ColumnTransformer(categorical=sklearn.pipeline.Pipeline(onehotencoder=skle
arn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._bas
e.SimpleImputer),numeric=sklearn.pipeline.Pipeline(variancethreshold=sklear
n.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sk
learn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNN
Imputer)),Classifier=sklearn.ensemble._forest.RandomForestClassifier)(1) | Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement `fit` and `transform` methods.
The final estimator only needs to implement `fit`.
The transformers in the pipeline can be cached using ``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters. For this, it
enables setting parameters of the various steps using their names and the
parameter name separated by a `'__'`, as in the example below. A step's
estimator may be replaced entirely by setting the parameter with its name
to another estimator, or a transformer removed by setting it to
`'passthrough'` or `None`. |
sklearn.impute._base.SimpleImputer(43)_add_indicator | false |
sklearn.impute._base.SimpleImputer(43)_copy | true |
sklearn.impute._base.SimpleImputer(43)_fill_value | null |
sklearn.impute._base.SimpleImputer(43)_keep_empty_features | false |
sklearn.impute._base.SimpleImputer(43)_missing_values | NaN |
sklearn.impute._base.SimpleImputer(43)_strategy | "most_frequent" |
sklearn.impute._base.SimpleImputer(43)_verbose | "deprecated" |
sklearn.ensemble._forest.RandomForestClassifier(28)_bootstrap | true |
sklearn.ensemble._forest.RandomForestClassifier(28)_ccp_alpha | 0.0 |
sklearn.ensemble._forest.RandomForestClassifier(28)_class_weight | null |
sklearn.ensemble._forest.RandomForestClassifier(28)_criterion | "entropy" |
sklearn.ensemble._forest.RandomForestClassifier(28)_max_depth | null |
sklearn.ensemble._forest.RandomForestClassifier(28)_max_features | 0.15870941193695215 |
sklearn.ensemble._forest.RandomForestClassifier(28)_max_leaf_nodes | null |
sklearn.ensemble._forest.RandomForestClassifier(28)_max_samples | null |
sklearn.ensemble._forest.RandomForestClassifier(28)_min_impurity_decrease | 0.003120332077977692 |
sklearn.ensemble._forest.RandomForestClassifier(28)_min_samples_leaf | 1 |
sklearn.ensemble._forest.RandomForestClassifier(28)_min_samples_split | 0.019767148752526248 |
sklearn.ensemble._forest.RandomForestClassifier(28)_min_weight_fraction_leaf | 0.008293701730325818 |
sklearn.ensemble._forest.RandomForestClassifier(28)_n_estimators | 161 |
sklearn.ensemble._forest.RandomForestClassifier(28)_n_jobs | -1 |
sklearn.ensemble._forest.RandomForestClassifier(28)_oob_score | true |
sklearn.ensemble._forest.RandomForestClassifier(28)_random_state | 50173 |
sklearn.ensemble._forest.RandomForestClassifier(28)_verbose | 0 |
sklearn.ensemble._forest.RandomForestClassifier(28)_warm_start | false |
sklearn.preprocessing._encoders.OneHotEncoder(43)_categories | "auto" |
sklearn.preprocessing._encoders.OneHotEncoder(43)_drop | null |
sklearn.preprocessing._encoders.OneHotEncoder(43)_dtype | {"oml-python:serialized_object": "type", "value": "np.float64"} |
sklearn.preprocessing._encoders.OneHotEncoder(43)_handle_unknown | "infrequent_if_exist" |
sklearn.preprocessing._encoders.OneHotEncoder(43)_max_categories | null |
sklearn.preprocessing._encoders.OneHotEncoder(43)_min_frequency | null |
sklearn.preprocessing._encoders.OneHotEncoder(43)_sparse | false |
sklearn.preprocessing._encoders.OneHotEncoder(43)_sparse_output | true |
sklearn.preprocessing._data.StandardScaler(15)_copy | true |
sklearn.preprocessing._data.StandardScaler(15)_with_mean | true |
sklearn.preprocessing._data.StandardScaler(15)_with_std | true |
sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer)(1)_memory | null |
sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer)(1)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "onehotencoder", "step_name": "onehotencoder"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "simpleimputer", "step_name": "simpleimputer"}}] |
sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer)(1)_verbose | false |
sklearn.impute._knn.KNNImputer(2)_add_indicator | false |
sklearn.impute._knn.KNNImputer(2)_copy | true |
sklearn.impute._knn.KNNImputer(2)_keep_empty_features | false |
sklearn.impute._knn.KNNImputer(2)_metric | "nan_euclidean" |
sklearn.impute._knn.KNNImputer(2)_missing_values | NaN |
sklearn.impute._knn.KNNImputer(2)_n_neighbors | 5 |
sklearn.impute._knn.KNNImputer(2)_weights | "uniform" |
sklearn.compose._column_transformer.ColumnTransformer(categorical=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer),numeric=sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer))(1)_n_jobs | null |
sklearn.compose._column_transformer.ColumnTransformer(categorical=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer),numeric=sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer))(1)_remainder | "drop" |
sklearn.compose._column_transformer.ColumnTransformer(categorical=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer),numeric=sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer))(1)_sparse_threshold | 0.3 |
sklearn.compose._column_transformer.ColumnTransformer(categorical=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer),numeric=sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer))(1)_transformer_weights | null |
sklearn.compose._column_transformer.ColumnTransformer(categorical=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer),numeric=sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer))(1)_transformers | [{"oml-python:serialized_object": "component_reference", "value": {"key": "categorical", "step_name": "categorical", "argument_1": {"oml-python:serialized_object": "function", "value": "openml.extensions.sklearn.cat"}}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "numeric", "step_name": "numeric", "argument_1": {"oml-python:serialized_object": "function", "value": "openml.extensions.sklearn.cont"}}}] |
sklearn.compose._column_transformer.ColumnTransformer(categorical=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer),numeric=sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer))(1)_verbose | false |
sklearn.compose._column_transformer.ColumnTransformer(categorical=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer),numeric=sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer))(1)_verbose_feature_names_out | true |
sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer)(1)_memory | null |
sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer)(1)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "variancethreshold", "step_name": "variancethreshold"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "standardscaler", "step_name": "standardscaler"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "knnimputer", "step_name": "knnimputer"}}] |
sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer)(1)_verbose | false |
sklearn.feature_selection._variance_threshold.VarianceThreshold(9)_threshold | 0.2 |
sklearn.pipeline.Pipeline(Preprocessing=sklearn.compose._column_transformer.ColumnTransformer(categorical=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer),numeric=sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer)),Classifier=sklearn.ensemble._forest.RandomForestClassifier)(1)_memory | null |
sklearn.pipeline.Pipeline(Preprocessing=sklearn.compose._column_transformer.ColumnTransformer(categorical=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer),numeric=sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer)),Classifier=sklearn.ensemble._forest.RandomForestClassifier)(1)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "Preprocessing", "step_name": "Preprocessing"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "Classifier", "step_name": "Classifier"}}] |
sklearn.pipeline.Pipeline(Preprocessing=sklearn.compose._column_transformer.ColumnTransformer(categorical=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder,simpleimputer=sklearn.impute._base.SimpleImputer),numeric=sklearn.pipeline.Pipeline(variancethreshold=sklearn.feature_selection._variance_threshold.VarianceThreshold,standardscaler=sklearn.preprocessing._data.StandardScaler,knnimputer=sklearn.impute._knn.KNNImputer)),Classifier=sklearn.ensemble._forest.RandomForestClassifier)(1)_verbose | false |