# import sys
# !{sys.executable} -m pip install --upgrade build
# !{sys.executable} -m pip install --upgrade --force-reinstall spotPython
13 HPT: sklearn RandomForestClassifier VBDP Data
spotPython
can be installed via pip. Alternatively, the source code can be downloaded from gitHub: https://github.com/sequential-parameter-optimization/spotPython.
!pip install spotPython
- Uncomment the following lines if you want to for (re-)installation the latest version of
spotPython
from gitHub.
13.1 Step 1: Setup
Before we consider the detailed experimental setup, we select the parameters that affect run time and the initial design size.
= 1
MAX_TIME = 5
INIT_SIZE = False
ORIGINAL = "16" PREFIX
import warnings
"ignore") warnings.filterwarnings(
13.2 Step 2: Initialization of the Empty fun_control
Dictionary
from spotPython.utils.init import fun_control_init
from spotPython.utils.file import get_experiment_name, get_spot_tensorboard_path
from spotPython.utils.device import getDevice
= get_experiment_name(prefix=PREFIX)
experiment_name
= fun_control_init(
fun_control ="classification",
task=get_spot_tensorboard_path(experiment_name)) spot_tensorboard_path
13.3 Step 3: PyTorch Data Loading
13.3.1 Load Data: Classification VBDP
import pandas as pd
if ORIGINAL == True:
= pd.read_csv('./data/VBDP/trainn.csv')
train_df = pd.read_csv('./data/VBDP/testt.csv')
test_df else:
= pd.read_csv('./data/VBDP/train.csv')
train_df # remove the id column
= train_df.drop(columns=['id']) train_df
from sklearn.preprocessing import OrdinalEncoder
= train_df.shape[0]
n_samples = train_df.shape[1] - 1
n_features = "prognosis"
target_column # Encoder our prognosis labels as integers for easier decoding later
= OrdinalEncoder()
enc = enc.fit_transform(train_df[[target_column]])
train_df[target_column] = [f"x{i}" for i in range(1, n_features+1)] + [target_column]
train_df.columns print(train_df.shape)
train_df.head()
(707, 65)
x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | ... | x56 | x57 | x58 | x59 | x60 | x61 | x62 | x63 | x64 | prognosis | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 1.0 | 1.0 | 0.0 | 1.0 | 1.0 | 1.0 | 1.0 | 0.0 | 1.0 | 1.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 3.0 |
1 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 1.0 | 0.0 | 1.0 | 0.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 7.0 |
2 | 0.0 | 1.0 | 1.0 | 1.0 | 0.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | ... | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 0.0 | 1.0 | 1.0 | 1.0 | 3.0 |
3 | 0.0 | 0.0 | 1.0 | 1.0 | 1.0 | 1.0 | 0.0 | 1.0 | 0.0 | 1.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 10.0 |
4 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 1.0 | 0.0 | ... | 0.0 | 1.0 | 0.0 | 0.0 | 1.0 | 1.0 | 1.0 | 0.0 | 0.0 | 6.0 |
5 rows × 65 columns
The full data set train_df
64 features. The target column is labeled as prognosis
.
13.3.2 Holdout Train and Test Data
We split out a hold-out test set (25% of the data) so we can calculate an example MAP@K
import numpy as np
from sklearn.model_selection import train_test_split
= train_test_split(train_df.drop(target_column, axis=1), train_df[target_column],
X_train, X_test, y_train, y_test =42,
random_state=0.25,
test_size=train_df[target_column])
stratify= pd.DataFrame(np.hstack((X_train, np.array(y_train).reshape(-1, 1))))
train = pd.DataFrame(np.hstack((X_test, np.array(y_test).reshape(-1, 1))))
test = [f"x{i}" for i in range(1, n_features+1)] + [target_column]
train.columns = [f"x{i}" for i in range(1, n_features+1)] + [target_column]
test.columns print(train.shape)
print(test.shape)
train.head()
(530, 65)
(177, 65)
x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | ... | x56 | x57 | x58 | x59 | x60 | x61 | x62 | x63 | x64 | prognosis | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 1.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 2.0 |
1 | 0.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 0.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 4.0 |
2 | 0.0 | 0.0 | 0.0 | 1.0 | 1.0 | 1.0 | 0.0 | 0.0 | 0.0 | 0.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 1.0 | 1.0 | 1.0 | 0.0 | 1.0 | 1.0 |
3 | 1.0 | 1.0 | 0.0 | 1.0 | 1.0 | 1.0 | 0.0 | 0.0 | 0.0 | 0.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 6.0 |
4 | 0.0 | 0.0 | 0.0 | 1.0 | 0.0 | 0.0 | 1.0 | 1.0 | 0.0 | 0.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 5.0 |
5 rows × 65 columns
# add the dataset to the fun_control
"data": train_df, # full dataset,
fun_control.update({"train": train,
"test": test,
"n_samples": n_samples,
"target_column": target_column})
13.4 Step 4: Specification of the Preprocessing Model
Data preprocesssing can be very simple, e.g., you can ignore it. Then you would choose the prep_model
“None”:
= None
prep_model "prep_model": prep_model}) fun_control.update({
A default approach for numerical data is the StandardScaler
(mean 0, variance 1). This can be selected as follows:
# prep_model = StandardScaler()
# fun_control.update({"prep_model": prep_model})
Even more complicated pre-processing steps are possible, e.g., the follwing pipeline:
# categorical_columns = []
# one_hot_encoder = OneHotEncoder(handle_unknown="ignore", sparse_output=False)
# prep_model = ColumnTransformer(
# transformers=[
# ("categorical", one_hot_encoder, categorical_columns),
# ],
# remainder=StandardScaler(),
# )
13.5 Step 5: Select Model (algorithm
) and core_model_hyper_dict
The selection of the algorithm (ML model) that should be tuned is done by specifying the its name from the sklearn
implementation. For example, the SVC
support vector machine classifier is selected as follows:
add_core_model_to_fun_control(SVC, fun_control, SklearnHyperDict)
Other core_models are, e.g.,:
- RidgeCV
- GradientBoostingRegressor
- ElasticNet
- RandomForestClassifier
- LogisticRegression
- KNeighborsClassifier
- RandomForestClassifier
- GradientBoostingClassifier
- HistGradientBoostingClassifier
We will use the RandomForestClassifier
classifier in this example.
from sklearn.linear_model import RidgeCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import ElasticNet
from spotPython.hyperparameters.values import add_core_model_to_fun_control
from spotPython.data.sklearn_hyper_dict import SklearnHyperDict
from spotPython.fun.hypersklearn import HyperSklearn
# core_model = RidgeCV
# core_model = GradientBoostingRegressor
# core_model = ElasticNet
= RandomForestClassifier
core_model # core_model = SVC
# core_model = LogisticRegression
# core_model = KNeighborsClassifier
# core_model = GradientBoostingClassifier
=core_model,
add_core_model_to_fun_control(core_model=fun_control,
fun_control=SklearnHyperDict,
hyper_dict=None) filename
Now fun_control
has the information from the JSON file. The available hyperparameters are:
print(*fun_control["core_model_hyper_dict"].keys(), sep="\n")
n_estimators
criterion
max_depth
min_samples_split
min_samples_leaf
min_weight_fraction_leaf
max_features
max_leaf_nodes
min_impurity_decrease
bootstrap
oob_score
13.6 Step 6: Modify hyper_dict
Hyperparameters for the Selected Algorithm aka core_model
13.6.1 Modify hyperparameter of type numeric and integer (boolean)
Numeric and boolean values can be modified using the modify_hyper_parameter_bounds
method. For example, to change the tol
hyperparameter of the SVC
model to the interval [1e-3, 1e-2], the following code can be used:
modify_hyper_parameter_bounds(fun_control, "tol", bounds=[1e-3, 1e-2])
from spotPython.hyperparameters.values import modify_hyper_parameter_bounds
# modify_hyper_parameter_bounds(fun_control, "tol", bounds=[1e-3, 1e-2])
13.6.2 Modify hyperparameter of type factor
spotPython
provides functions for modifying the hyperparameters, their bounds and factors as well as for activating and de-activating hyperparameters without re-compilation of the Python source code. These functions were described in Section 12.6.
Factors can be modified with the modify_hyper_parameter_levels
function. For example, to exclude the sigmoid
kernel from the tuning, the kernel
hyperparameter of the SVC
model can be modified as follows:
modify_hyper_parameter_levels(fun_control, "kernel", ["linear", "rbf"])
The new setting can be controlled via:
fun_control["core_model_hyper_dict"]["kernel"]
from spotPython.hyperparameters.values import modify_hyper_parameter_levels
# XGBoost:
# modify_hyper_parameter_levels(fun_control, "loss", ["log_loss"])
Since oob_score
requires the bootstrap
hyperparameter to True
, we set the oob_score
parameter to False
. The oob_score
is later discussed in Section 13.7.3.
"bootstrap", bounds=[0, 1])
modify_hyper_parameter_bounds(fun_control, "oob_score", bounds=[0, 0]) modify_hyper_parameter_bounds(fun_control,
13.6.3 Optimizers
Optimizers are described in Section 12.6.1.
13.6.4 Selection of the Objective: Metric and Loss Functions
- Machine learning models are optimized with respect to a metric, for example, the
accuracy
function. - Deep learning, e.g., neural networks are optimized with respect to a loss function, for example, the
cross_entropy
function and evaluated with respect to a metric, for example, theaccuracy
function.
13.7 Step 7: Selection of the Objective (Loss) Function
The loss function, that is usually used in deep learning for optimizing the weights of the net, is stored in the fun_control
dictionary as "loss_function"
.
13.7.1 Metric Function
There are two different types of metrics in spotPython
:
"metric_river"
is used for the river based evaluation viaeval_oml_iter_progressive
."metric_sklearn"
is used for the sklearn based evaluation.
We will consider multi-class classification metrics, e.g., mapk_score
and top_k_accuracy_score
.
In this multi-class classification example the machine learning algorithm should return the probabilities of the specific classes ("predict_proba"
) instead of the predicted values.
We set "predict_proba"
to True
in the fun_control
dictionary.
13.7.1.1 The MAPK Metric
To select the MAPK metric, the following two entries can be added to the fun_control
dictionary:
"metric_sklearn": mapk_score"
"metric_params": {"k": 3}
.
13.7.1.2 Other Metrics
Alternatively, other metrics for multi-class classification can be used, e.g.,: * top_k_accuracy_score or * roc_auc_score
The metric roc_auc_score
requires the parameter "multi_class"
, e.g.,
"multi_class": "ovr"
.
This is set in the fun_control
dictionary.
spotPython
performs a minimization, therefore, metrics that should be maximized have to be multiplied by -1. This is done by setting "weights"
to -1
.
- The complete setup for the metric in our example is:
from spotPython.utils.metrics import mapk_score
fun_control.update({"weights": -1,
"metric_sklearn": mapk_score,
"predict_proba": True,
"metric_params": {"k": 3},
})
13.7.2 Evaluation on Hold-out Data
- The default method for computing the performance is
"eval_holdout"
. - Alternatively, cross-validation can be used for every machine learning model.
- Specifically for RandomForests, the OOB-score can be used.
fun_control.update({"eval": "train_hold_out",
})
13.7.3 OOB Score
Using the OOB-Score is a very efficient way to estimate the performance of a random forest classifier. The OOB-Score is calculated on the training data and does not require a hold-out test set. If the OOB-Score is used, the key “eval” in the fun_control
dictionary should be set to "oob_score"
as shown below.
In addition to setting the key "eval"
in the fun_control
dictionary to "oob_score"
, the keys "oob_score"
and "bootstrap"
have to be set to True
, because the OOB-Score requires the bootstrap method.
- Uncomment the following lines to use the OOB-Score:
fun_control.update({"eval": "eval_oob_score",
})"bootstrap", bounds=[1, 1])
modify_hyper_parameter_bounds(fun_control, "oob_score", bounds=[1, 1]) modify_hyper_parameter_bounds(fun_control,
13.7.3.1 Cross Validation
Instead of using the OOB-score, the classical cross validation can be used. The number of folds is set by the key "k_folds"
. For example, to use 5-fold cross validation, the key "k_folds"
is set to 5
. Uncomment the following line to use cross validation:
# fun_control.update({
# "eval": "train_cv",
# "k_folds": 10,
# })
13.8 Step 8: Calling the SPOT Function
13.8.1 Preparing the SPOT Call
- Get types and variable names as well as lower and upper bounds for the hyperparameters.
# extract the variable types, names, and bounds
from spotPython.hyperparameters.values import (get_bound_values,
get_var_name,
get_var_type,)= get_var_type(fun_control)
var_type = get_var_name(fun_control)
var_name = get_bound_values(fun_control, "lower")
lower = get_bound_values(fun_control, "upper") upper
from spotPython.utils.eda import gen_design_table
print(gen_design_table(fun_control))
| name | type | default | lower | upper | transform |
|--------------------------|--------|-----------|---------|---------|------------------------|
| n_estimators | int | 7 | 5 | 10 | transform_power_2_int |
| criterion | factor | gini | 0 | 2 | None |
| max_depth | int | 10 | 1 | 20 | transform_power_2_int |
| min_samples_split | int | 2 | 2 | 100 | None |
| min_samples_leaf | int | 1 | 1 | 25 | None |
| min_weight_fraction_leaf | float | 0.0 | 0 | 0.01 | None |
| max_features | factor | sqrt | 0 | 1 | transform_none_to_None |
| max_leaf_nodes | int | 10 | 7 | 12 | transform_power_2_int |
| min_impurity_decrease | float | 0.0 | 0 | 0.01 | None |
| bootstrap | factor | 1 | 1 | 1 | None |
| oob_score | factor | 0 | 1 | 1 | None |
13.8.2 The Objective Function
The objective function is selected next. It implements an interface from sklearn
’s training, validation, and testing methods to spotPython
.
from spotPython.fun.hypersklearn import HyperSklearn
= HyperSklearn().fun_sklearn fun
13.8.3 Run the Spot
Optimizer
- Run SPOT for approx. x mins (
max_time
). - Note: the run takes longer, because the evaluation time of initial design (here:
initi_size
, 20 points) is not considered.
from spotPython.hyperparameters.values import get_default_hyperparameters_as_array
= get_default_hyperparameters_as_array(fun_control)
X_start X_start
array([[ 7., 0., 10., 2., 1., 0., 0., 10., 0., 1., 0.]])
import numpy as np
from spotPython.spot import spot
from math import inf
= spot.Spot(fun=fun,
spot_tuner = lower,
lower = upper,
upper = inf,
fun_evals = 1,
fun_repeats = MAX_TIME,
max_time = False,
noise = np.sqrt(np.spacing(1)),
tolerance_x = var_type,
var_type = var_name,
var_name = "y",
infill_criterion = 1,
n_points =123,
seed= 50,
log_level = False,
show_models= True,
show_progress= fun_control,
fun_control ={"init_size": INIT_SIZE,
design_control"repeats": 1},
={"noise": True,
surrogate_control"cod_type": "norm",
"min_theta": -4,
"max_theta": 3,
"n_theta": len(var_name),
"model_fun_evals": 10_000,
"log_level": 50
})=X_start) spot_tuner.run(X_start
spotPython tuning: -0.34276729559748426 [----------] 1.45%
spotPython tuning: -0.34685534591194966 [----------] 2.78%
spotPython tuning: -0.34685534591194966 [----------] 3.70%
spotPython tuning: -0.34685534591194966 [#---------] 5.14%
spotPython tuning: -0.34685534591194966 [#---------] 6.62%
spotPython tuning: -0.34685534591194966 [#---------] 8.17%
spotPython tuning: -0.34685534591194966 [#---------] 9.48%
spotPython tuning: -0.34685534591194966 [#---------] 10.74%
spotPython tuning: -0.34685534591194966 [#---------] 12.38%
spotPython tuning: -0.34685534591194966 [#---------] 14.24%
spotPython tuning: -0.34685534591194966 [##--------] 15.78%
spotPython tuning: -0.34685534591194966 [##--------] 17.25%
spotPython tuning: -0.34685534591194966 [##--------] 20.16%
spotPython tuning: -0.34685534591194966 [###-------] 25.54%
spotPython tuning: -0.34685534591194966 [###-------] 31.37%
spotPython tuning: -0.34685534591194966 [####------] 36.62%
spotPython tuning: -0.34685534591194966 [####------] 40.66%
spotPython tuning: -0.34874213836477985 [#####-----] 45.19%
spotPython tuning: -0.35188679245283017 [#####-----] 50.51%
spotPython tuning: -0.35188679245283017 [#####-----] 54.85%
spotPython tuning: -0.3534591194968553 [######----] 61.63%
spotPython tuning: -0.3581761006289308 [#######---] 66.18%
spotPython tuning: -0.3581761006289308 [#######---] 72.06%
spotPython tuning: -0.3581761006289308 [########--] 75.71%
spotPython tuning: -0.3581761006289308 [########--] 82.79%
spotPython tuning: -0.3581761006289308 [#########-] 87.87%
spotPython tuning: -0.3581761006289308 [#########-] 93.16%
spotPython tuning: -0.3581761006289308 [##########] 99.47%
spotPython tuning: -0.36163522012578614 [##########] 100.00% Done...
<spotPython.spot.spot.Spot at 0x295b2ffd0>
13.9 Step 9: Tensorboard
The textual output shown in the console (or code cell) can be visualized with Tensorboard as described in Section 12.9, see also the description in the documentation: Tensorboard.
13.10 Step 10: Results
After the hyperparameter tuning run is finished, the progress of the hyperparameter tuning can be visualized. The following code generates the progress plot from ?fig-progress.
=False,
spot_tuner.plot_progress(log_y="./figures/" + experiment_name+"_progress.png") filename
- Print the results
print(gen_design_table(fun_control=fun_control,
=spot_tuner)) spot
| name | type | default | lower | upper | tuned | transform | importance | stars |
|--------------------------|--------|-----------|---------|---------|----------------------|------------------------|--------------|---------|
| n_estimators | int | 7 | 5.0 | 10.0 | 10.0 | transform_power_2_int | 4.30 | * |
| criterion | factor | gini | 0.0 | 2.0 | 1.0 | None | 5.84 | * |
| max_depth | int | 10 | 1.0 | 20.0 | 10.0 | transform_power_2_int | 13.95 | * |
| min_samples_split | int | 2 | 2.0 | 100.0 | 2.0 | None | 50.82 | ** |
| min_samples_leaf | int | 1 | 1.0 | 25.0 | 1.0 | None | 100.00 | *** |
| min_weight_fraction_leaf | float | 0.0 | 0.0 | 0.01 | 0.008341673749104165 | None | 0.00 | |
| max_features | factor | sqrt | 0.0 | 1.0 | 0.0 | transform_none_to_None | 0.78 | . |
| max_leaf_nodes | int | 10 | 7.0 | 12.0 | 9.0 | transform_power_2_int | 0.00 | |
| min_impurity_decrease | float | 0.0 | 0.0 | 0.01 | 0.01 | None | 0.00 | |
| bootstrap | factor | 1 | 1.0 | 1.0 | 1.0 | None | 0.00 | |
| oob_score | factor | 0 | 1.0 | 1.0 | 1.0 | None | 0.00 | |
13.10.1 Show variable importance
=0.025, filename="./figures/" + experiment_name+"_importance.png") spot_tuner.plot_importance(threshold
13.10.2 Get Default Hyperparameters
from spotPython.hyperparameters.values import get_default_values, transform_hyper_parameter_values
= get_default_values(fun_control)
values_default = transform_hyper_parameter_values(fun_control=fun_control, hyper_parameter_values=values_default)
values_default values_default
{'n_estimators': 128,
'criterion': 'gini',
'max_depth': 1024,
'min_samples_split': 2,
'min_samples_leaf': 1,
'min_weight_fraction_leaf': 0.0,
'max_features': 'sqrt',
'max_leaf_nodes': 1024,
'min_impurity_decrease': 0.0,
'bootstrap': 1,
'oob_score': 0}
from sklearn.pipeline import make_pipeline
= make_pipeline(fun_control["prep_model"], fun_control["core_model"](**values_default))
model_default model_default
Pipeline(steps=[('nonetype', None), ('randomforestclassifier', RandomForestClassifier(bootstrap=1, max_depth=1024, max_leaf_nodes=1024, n_estimators=128, oob_score=0))])In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
Pipeline(steps=[('nonetype', None), ('randomforestclassifier', RandomForestClassifier(bootstrap=1, max_depth=1024, max_leaf_nodes=1024, n_estimators=128, oob_score=0))])
None
RandomForestClassifier(bootstrap=1, max_depth=1024, max_leaf_nodes=1024, n_estimators=128, oob_score=0)
13.10.3 Get SPOT Results
= spot_tuner.to_all_dim(spot_tuner.min_X.reshape(1,-1))
X print(X)
[[1.00000000e+01 1.00000000e+00 1.00000000e+01 2.00000000e+00
1.00000000e+00 8.34167375e-03 0.00000000e+00 9.00000000e+00
1.00000000e-02 1.00000000e+00 1.00000000e+00]]
from spotPython.hyperparameters.values import assign_values, return_conf_list_from_var_dict
= assign_values(X, fun_control["var_name"])
v_dict =v_dict, fun_control=fun_control) return_conf_list_from_var_dict(var_dict
[{'n_estimators': 1024,
'criterion': 'entropy',
'max_depth': 1024,
'min_samples_split': 2,
'min_samples_leaf': 1,
'min_weight_fraction_leaf': 0.008341673749104165,
'max_features': 'sqrt',
'max_leaf_nodes': 512,
'min_impurity_decrease': 0.01,
'bootstrap': 1,
'oob_score': 1}]
from spotPython.hyperparameters.values import get_one_sklearn_model_from_X
= get_one_sklearn_model_from_X(X, fun_control)
model_spot model_spot
RandomForestClassifier(bootstrap=1, criterion='entropy', max_depth=1024, max_leaf_nodes=512, min_impurity_decrease=0.01, min_weight_fraction_leaf=0.008341673749104165, n_estimators=1024, oob_score=1)In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
RandomForestClassifier(bootstrap=1, criterion='entropy', max_depth=1024, max_leaf_nodes=512, min_impurity_decrease=0.01, min_weight_fraction_leaf=0.008341673749104165, n_estimators=1024, oob_score=1)
13.10.4 Evaluate SPOT Results
- Fetch the data.
from spotPython.utils.convert import get_Xy_from_df
= get_Xy_from_df(fun_control["train"], fun_control["target_column"])
X_train, y_train = get_Xy_from_df(fun_control["test"], fun_control["target_column"])
X_test, y_test X_test.shape, y_test.shape
((177, 64), (177,))
- Fit the model with the tuned hyperparameters. This gives one result:
model_spot.fit(X_train, y_train)= model_spot.predict_proba(X_test)
y_pred = mapk_score(y_true=y_test, y_pred=y_pred, k=3)
res res
0.3465160075329567
def repeated_eval(n, model):
= []
res_values for i in range(n):
model.fit(X_train, y_train)= model.predict_proba(X_test)
y_pred = mapk_score(y_true=y_test, y_pred=y_pred, k=3)
res
res_values.append(res)= np.mean(res_values)
mean_res print(f"mean_res: {mean_res}")
= np.std(res_values)
std_res print(f"std_res: {std_res}")
= np.min(res_values)
min_res print(f"min_res: {min_res}")
= np.max(res_values)
max_res print(f"max_res: {max_res}")
= np.median(res_values)
median_res print(f"median_res: {median_res}")
return mean_res, std_res, min_res, max_res, median_res
13.10.5 Handling Non-deterministic Results
- Because the model is non-determinstic, we perform \(n=30\) runs and calculate the mean and standard deviation of the performance metric.
= repeated_eval(30, model_spot) _
mean_res: 0.3571876961707471
std_res: 0.006089463559461536
min_res: 0.34463276836158185
max_res: 0.36911487758945394
median_res: 0.35781544256120523
13.10.6 Evalution of the Default Hyperparameters
"randomforestclassifier"] model_default.fit(X_train, y_train)[
RandomForestClassifier(bootstrap=1, max_depth=1024, max_leaf_nodes=1024, n_estimators=128, oob_score=0)In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
RandomForestClassifier(bootstrap=1, max_depth=1024, max_leaf_nodes=1024, n_estimators=128, oob_score=0)
- One evaluation of the default hyperparameters is performed on the hold-out test set.
= model_default.predict_proba(X_test)
y_pred =y_test, y_pred=y_pred, k=3) mapk_score(y_true
0.3267419962335216
Since one single evaluation is not meaningful, we perform, similar to the evaluation of the SPOT results, \(n=30\) runs of the default setting and and calculate the mean and standard deviation of the performance metric.
= repeated_eval(30, model_default) _
mean_res: 0.34202762084118
std_res: 0.014730019225568549
min_res: 0.3135593220338983
max_res: 0.3757062146892655
median_res: 0.3418079096045198
13.10.7 Plot: Compare Predictions
from spotPython.plot.validation import plot_confusion_matrix
= "Default") plot_confusion_matrix(model_default, fun_control, title
="SPOT") plot_confusion_matrix(model_spot, fun_control, title
min(spot_tuner.y), max(spot_tuner.y)
(-0.36163522012578614, -0.27578616352201263)
13.10.8 Cross-validated Evaluations
from spotPython.sklearn.traintest import evaluate_cv
fun_control.update({"eval": "train_cv",
"k_folds": 10,
})=model_spot, fun_control=fun_control, verbose=0) evaluate_cv(model
(0.3584905660377358, None)
fun_control.update({"eval": "test_cv",
"k_folds": 10,
})=model_spot, fun_control=fun_control, verbose=0) evaluate_cv(model
(0.3425381263616557, None)
- This is the evaluation that will be used in the comparison:
fun_control.update({"eval": "data_cv",
"k_folds": 10,
})=model_spot, fun_control=fun_control, verbose=0) evaluate_cv(model
(0.36150570087189804, None)
13.10.9 Detailed Hyperparameter Plots
= "./figures/" + experiment_name
filename =filename) spot_tuner.plot_important_hyperparameter_contour(filename
n_estimators: 4.29616772877326
criterion: 5.841472248728804
max_depth: 13.952946964814346
min_samples_split: 50.82400379058885
min_samples_leaf: 100.0
max_features: 0.7804639314403174
13.10.10 Parallel Coordinates Plot
spot_tuner.parallel_plot()
13.10.11 Plot all Combinations of Hyperparameters
- Warning: this may take a while.
= False
PLOT_ALL if PLOT_ALL:
= spot_tuner.k
n for i in range(n-1):
for j in range(i+1, n):
=i, j=j, min_z=min_z, max_z = max_z) spot_tuner.plot_contour(i