sklearn.pipeline.Pipeline(columntransformer=sklearn.compose._column_transfo
rmer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(missingindicator=s
klearn.impute.MissingIndicator,imputer=sklearn.preprocessing.imputation.Imp
uter,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=skle
arn.pipeline.Pipeline(simpleimputer=sklearn.impute.SimpleImputer,onehotenco
der=sklearn.preprocessing._encoders.OneHotEncoder)),gradientboostingclassif
ier=sklearn.ensemble.gradient_boosting.GradientBoostingClassifier)(2) | Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The transformers in the pipeline can be cached using ``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
A step's estimator may be replaced entirely by setting the parameter
with its name to another estimator, or a transformer removed by setting
to None. |
sklearn.preprocessing.imputation.Imputer(56)_axis | 0 |
sklearn.preprocessing.imputation.Imputer(56)_copy | true |
sklearn.preprocessing.imputation.Imputer(56)_missing_values | "NaN" |
sklearn.preprocessing.imputation.Imputer(56)_strategy | "mean" |
sklearn.preprocessing.imputation.Imputer(56)_verbose | 0 |
sklearn.preprocessing.data.StandardScaler(44)_copy | true |
sklearn.preprocessing.data.StandardScaler(44)_with_mean | true |
sklearn.preprocessing.data.StandardScaler(44)_with_std | true |
sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)(7)_memory | null |
sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)(7)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "simpleimputer", "step_name": "simpleimputer"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "onehotencoder", "step_name": "onehotencoder"}}] |
sklearn.impute.SimpleImputer(19)_copy | true |
sklearn.impute.SimpleImputer(19)_fill_value | -1 |
sklearn.impute.SimpleImputer(19)_missing_values | NaN |
sklearn.impute.SimpleImputer(19)_strategy | "constant" |
sklearn.impute.SimpleImputer(19)_verbose | 0 |
sklearn.preprocessing._encoders.OneHotEncoder(28)_categorical_features | null |
sklearn.preprocessing._encoders.OneHotEncoder(28)_categories | null |
sklearn.preprocessing._encoders.OneHotEncoder(28)_dtype | {"oml-python:serialized_object": "type", "value": "np.float64"} |
sklearn.preprocessing._encoders.OneHotEncoder(28)_handle_unknown | "ignore" |
sklearn.preprocessing._encoders.OneHotEncoder(28)_n_values | null |
sklearn.preprocessing._encoders.OneHotEncoder(28)_sparse | true |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(missingindicator=sklearn.impute.MissingIndicator,imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(3)_n_jobs | null |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(missingindicator=sklearn.impute.MissingIndicator,imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(3)_remainder | "passthrough" |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(missingindicator=sklearn.impute.MissingIndicator,imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(3)_sparse_threshold | 0.3 |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(missingindicator=sklearn.impute.MissingIndicator,imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(3)_transformer_weights | null |
sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(missingindicator=sklearn.impute.MissingIndicator,imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(3)_transformers | [{"oml-python:serialized_object": "component_reference", "value": {"key": "numeric", "step_name": "numeric", "argument_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "nominal", "step_name": "nominal", "argument_1": []}}] |
sklearn.pipeline.Pipeline(missingindicator=sklearn.impute.MissingIndicator,imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler)(3)_memory | null |
sklearn.pipeline.Pipeline(missingindicator=sklearn.impute.MissingIndicator,imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler)(3)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "missingindicator", "step_name": "missingindicator"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "imputer", "step_name": "imputer"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "standardscaler", "step_name": "standardscaler"}}] |
sklearn.impute.MissingIndicator(4)_error_on_new | false |
sklearn.impute.MissingIndicator(4)_features | "missing-only" |
sklearn.impute.MissingIndicator(4)_missing_values | NaN |
sklearn.impute.MissingIndicator(4)_sparse | "auto" |
sklearn.pipeline.Pipeline(columntransformer=sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(missingindicator=sklearn.impute.MissingIndicator,imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)),gradientboostingclassifier=sklearn.ensemble.gradient_boosting.GradientBoostingClassifier)(2)_memory | null |
sklearn.pipeline.Pipeline(columntransformer=sklearn.compose._column_transformer.ColumnTransformer(numeric=sklearn.pipeline.Pipeline(missingindicator=sklearn.impute.MissingIndicator,imputer=sklearn.preprocessing.imputation.Imputer,standardscaler=sklearn.preprocessing.data.StandardScaler),nominal=sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute.SimpleImputer,onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)),gradientboostingclassifier=sklearn.ensemble.gradient_boosting.GradientBoostingClassifier)(2)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "columntransformer", "step_name": "columntransformer"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "gradientboostingclassifier", "step_name": "gradientboostingclassifier"}}] |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_criterion | "mae" |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_init | null |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_learning_rate | 0.020102565419330278 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_loss | "deviance" |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_max_depth | 2 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_max_features | 0.287001498644621 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_max_leaf_nodes | null |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_min_impurity_decrease | 0.034653748674688845 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_min_impurity_split | null |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_min_samples_leaf | 16 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_min_samples_split | 16 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_min_weight_fraction_leaf | 0.008077788835712196 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_n_estimators | 486 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_n_iter_no_change | 135 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_presort | "auto" |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_random_state | 21014 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_subsample | 0.2530604992109713 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_tol | 0.015236516124934948 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_validation_fraction | 0.7361888267104817 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_verbose | 0 |
sklearn.ensemble.gradient_boosting.GradientBoostingClassifier(28)_warm_start | false |