sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,
columntransformer=sklearn.compose._column_transformer.ColumnTransformer(num
=sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.Standa
rdScaler),cat=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing
._encoders.OneHotEncoder)),logisticregression=sklearn.linear_model.logistic
.LogisticRegression)(2) | Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The transformers in the pipeline can be cached using ``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
A step's estimator may be replaced entirely by setting the parameter
with its name to another estimator, or a transformer removed by setting
it to 'passthrough' or ``None``. |
sklearn.preprocessing.data.StandardScaler(35)_copy | true |
sklearn.preprocessing.data.StandardScaler(35)_with_mean | true |
sklearn.preprocessing.data.StandardScaler(35)_with_std | true |
sklearn.impute._base.SimpleImputer(11)_add_indicator | false |
sklearn.impute._base.SimpleImputer(11)_copy | true |
sklearn.impute._base.SimpleImputer(11)_fill_value | null |
sklearn.impute._base.SimpleImputer(11)_missing_values | NaN |
sklearn.impute._base.SimpleImputer(11)_strategy | "most_frequent" |
sklearn.impute._base.SimpleImputer(11)_verbose | 0 |
sklearn.preprocessing._encoders.OneHotEncoder(16)_categorical_features | null |
sklearn.preprocessing._encoders.OneHotEncoder(16)_categories | null |
sklearn.preprocessing._encoders.OneHotEncoder(16)_drop | null |
sklearn.preprocessing._encoders.OneHotEncoder(16)_dtype | {"oml-python:serialized_object": "type", "value": "np.float64"} |
sklearn.preprocessing._encoders.OneHotEncoder(16)_handle_unknown | "ignore" |
sklearn.preprocessing._encoders.OneHotEncoder(16)_n_values | null |
sklearn.preprocessing._encoders.OneHotEncoder(16)_sparse | true |
sklearn.linear_model.logistic.LogisticRegression(33)_C | 25.0 |
sklearn.linear_model.logistic.LogisticRegression(33)_class_weight | null |
sklearn.linear_model.logistic.LogisticRegression(33)_dual | false |
sklearn.linear_model.logistic.LogisticRegression(33)_fit_intercept | true |
sklearn.linear_model.logistic.LogisticRegression(33)_intercept_scaling | 1 |
sklearn.linear_model.logistic.LogisticRegression(33)_l1_ratio | null |
sklearn.linear_model.logistic.LogisticRegression(33)_max_iter | 100 |
sklearn.linear_model.logistic.LogisticRegression(33)_multi_class | "warn" |
sklearn.linear_model.logistic.LogisticRegression(33)_n_jobs | null |
sklearn.linear_model.logistic.LogisticRegression(33)_penalty | "l2" |
sklearn.linear_model.logistic.LogisticRegression(33)_random_state | 1 |
sklearn.linear_model.logistic.LogisticRegression(33)_solver | "liblinear" |
sklearn.linear_model.logistic.LogisticRegression(33)_tol | 0.0001 |
sklearn.linear_model.logistic.LogisticRegression(33)_verbose | 0 |
sklearn.linear_model.logistic.LogisticRegression(33)_warm_start | false |
sklearn.compose._column_transformer.ColumnTransformer(num=sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.StandardScaler),cat=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(6)_n_jobs | null |
sklearn.compose._column_transformer.ColumnTransformer(num=sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.StandardScaler),cat=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(6)_remainder | "drop" |
sklearn.compose._column_transformer.ColumnTransformer(num=sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.StandardScaler),cat=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(6)_sparse_threshold | 0.3 |
sklearn.compose._column_transformer.ColumnTransformer(num=sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.StandardScaler),cat=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(6)_transformer_weights | null |
sklearn.compose._column_transformer.ColumnTransformer(num=sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.StandardScaler),cat=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(6)_transformers | [{"oml-python:serialized_object": "component_reference", "value": {"key": "num", "step_name": "num", "argument_1": [false, true, false, false, false, true, true, true, true, false, true, true, true, true, true, true, true, true, true]}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "cat", "step_name": "cat", "argument_1": [true, false, true, true, true, false, false, false, false, true, false, false, false, false, false, false, false, false, false]}}] |
sklearn.compose._column_transformer.ColumnTransformer(num=sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.StandardScaler),cat=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder))(6)_verbose | false |
sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.StandardScaler)(6)_memory | null |
sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.StandardScaler)(6)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "standardscaler", "step_name": "standardscaler"}}] |
sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.StandardScaler)(6)_verbose | false |
sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)(7)_memory | null |
sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)(7)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "onehotencoder", "step_name": "onehotencoder"}}] |
sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)(7)_verbose | false |
sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,columntransformer=sklearn.compose._column_transformer.ColumnTransformer(num=sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.StandardScaler),cat=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)),logisticregression=sklearn.linear_model.logistic.LogisticRegression)(2)_memory | null |
sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,columntransformer=sklearn.compose._column_transformer.ColumnTransformer(num=sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.StandardScaler),cat=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)),logisticregression=sklearn.linear_model.logistic.LogisticRegression)(2)_steps | [{"oml-python:serialized_object": "component_reference", "value": {"key": "simpleimputer", "step_name": "simpleimputer"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "columntransformer", "step_name": "columntransformer"}}, {"oml-python:serialized_object": "component_reference", "value": {"key": "logisticregression", "step_name": "logisticregression"}}] |
sklearn.pipeline.Pipeline(simpleimputer=sklearn.impute._base.SimpleImputer,columntransformer=sklearn.compose._column_transformer.ColumnTransformer(num=sklearn.pipeline.Pipeline(standardscaler=sklearn.preprocessing.data.StandardScaler),cat=sklearn.pipeline.Pipeline(onehotencoder=sklearn.preprocessing._encoders.OneHotEncoder)),logisticregression=sklearn.linear_model.logistic.LogisticRegression)(2)_verbose | false |