# import sys
# !{sys.executable} -m pip install --upgrade build
# !{sys.executable} -m pip install --upgrade --force-reinstall spotPython
15 HPT: sklearn SVC VBDP Data
spotPython
can be installed via pip. Alternatively, the source code can be downloaded from gitHub: https://github.com/sequential-parameter-optimization/spotPython.
!pip install spotPython
- Uncomment the following lines if you want to for (re-)installation the latest version of
spotPython
from gitHub.
15.1 Step 1: Setup
Before we consider the detailed experimental setup, we select the parameters that affect run time and the initial design size.
= 1
MAX_TIME = 5
INIT_SIZE = False
ORIGINAL = "18" PREFIX
import warnings
"ignore") warnings.filterwarnings(
15.2 Step 2: Initialization of the Empty fun_control
Dictionary
from spotPython.utils.init import fun_control_init
from spotPython.utils.file import get_experiment_name, get_spot_tensorboard_path
from spotPython.utils.device import getDevice
= get_experiment_name(prefix=PREFIX)
experiment_name
= fun_control_init(
fun_control ="classification",
task=get_spot_tensorboard_path(experiment_name)) spot_tensorboard_path
15.3 Step 3: PyTorch Data Loading
15.3.1 1. Load Data: Classification VBDP
import pandas as pd
if ORIGINAL == True:
= pd.read_csv('./data/VBDP/trainn.csv')
train_df = pd.read_csv('./data/VBDP/testt.csv')
test_df else:
= pd.read_csv('./data/VBDP/train.csv')
train_df # remove the id column
= train_df.drop(columns=['id']) train_df
from sklearn.preprocessing import OrdinalEncoder
= train_df.shape[0]
n_samples = train_df.shape[1] - 1
n_features = "prognosis"
target_column # Encoder our prognosis labels as integers for easier decoding later
= OrdinalEncoder()
enc = enc.fit_transform(train_df[[target_column]])
train_df[target_column] = [f"x{i}" for i in range(1, n_features+1)] + [target_column]
train_df.columns print(train_df.shape)
train_df.head()
(707, 65)
x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | ... | x56 | x57 | x58 | x59 | x60 | x61 | x62 | x63 | x64 | prognosis | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 1.0 | 1.0 | 0.0 | 1.0 | 1.0 | 1.0 | 1.0 | 0.0 | 1.0 | 1.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 3.0 |
1 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 1.0 | 0.0 | 1.0 | 0.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 7.0 |
2 | 0.0 | 1.0 | 1.0 | 1.0 | 0.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | ... | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 0.0 | 1.0 | 1.0 | 1.0 | 3.0 |
3 | 0.0 | 0.0 | 1.0 | 1.0 | 1.0 | 1.0 | 0.0 | 1.0 | 0.0 | 1.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 10.0 |
4 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 1.0 | 0.0 | ... | 0.0 | 1.0 | 0.0 | 0.0 | 1.0 | 1.0 | 1.0 | 0.0 | 0.0 | 6.0 |
5 rows × 65 columns
The full data set train_df
64 features. The target column is labeled as prognosis
.
15.3.2 Holdout Train and Test Data
We split out a hold-out test set (25% of the data) so we can calculate an example MAP@K
import numpy as np
from sklearn.model_selection import train_test_split
= train_test_split(train_df.drop(target_column, axis=1), train_df[target_column],
X_train, X_test, y_train, y_test =42,
random_state=0.25,
test_size=train_df[target_column])
stratify= pd.DataFrame(np.hstack((X_train, np.array(y_train).reshape(-1, 1))))
train = pd.DataFrame(np.hstack((X_test, np.array(y_test).reshape(-1, 1))))
test = [f"x{i}" for i in range(1, n_features+1)] + [target_column]
train.columns = [f"x{i}" for i in range(1, n_features+1)] + [target_column]
test.columns print(train.shape)
print(test.shape)
train.head()
(530, 65)
(177, 65)
x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | ... | x56 | x57 | x58 | x59 | x60 | x61 | x62 | x63 | x64 | prognosis | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 1.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 2.0 |
1 | 0.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 0.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 4.0 |
2 | 0.0 | 0.0 | 0.0 | 1.0 | 1.0 | 1.0 | 0.0 | 0.0 | 0.0 | 0.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 1.0 | 1.0 | 1.0 | 0.0 | 1.0 | 1.0 |
3 | 1.0 | 1.0 | 0.0 | 1.0 | 1.0 | 1.0 | 0.0 | 0.0 | 0.0 | 0.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 6.0 |
4 | 0.0 | 0.0 | 0.0 | 1.0 | 0.0 | 0.0 | 1.0 | 1.0 | 0.0 | 0.0 | ... | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 5.0 |
5 rows × 65 columns
# add the dataset to the fun_control
"data": train_df, # full dataset,
fun_control.update({"train": train,
"test": test,
"n_samples": n_samples,
"target_column": target_column})
15.4 Step 4: Specification of the Preprocessing Model
Data preprocesssing can be very simple, e.g., you can ignore it. Then you would choose the prep_model
“None”:
= None
prep_model "prep_model": prep_model}) fun_control.update({
A default approach for numerical data is the StandardScaler
(mean 0, variance 1). This can be selected as follows:
# prep_model = StandardScaler()
# fun_control.update({"prep_model": prep_model})
Even more complicated pre-processing steps are possible, e.g., the follwing pipeline:
# categorical_columns = []
# one_hot_encoder = OneHotEncoder(handle_unknown="ignore", sparse_output=False)
# prep_model = ColumnTransformer(
# transformers=[
# ("categorical", one_hot_encoder, categorical_columns),
# ],
# remainder=StandardScaler(),
# )
15.5 Step 5: Select Model (algorithm
) and core_model_hyper_dict
The selection of the algorithm (ML model) that should be tuned is done by specifying the its name from the sklearn
implementation. For example, the SVC
support vector machine classifier is selected as follows:
add_core_model_to_fun_control(SVC, fun_control, SklearnHyperDict)
Other core_models are, e.g.,:
- RidgeCV
- GradientBoostingRegressor
- ElasticNet
- RandomForestClassifier
- LogisticRegression
- KNeighborsClassifier
- RandomForestClassifier
- GradientBoostingClassifier
- HistGradientBoostingClassifier
We will use the RandomForestClassifier
classifier in this example.
from sklearn.linear_model import RidgeCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.linear_model import ElasticNet
from spotPython.hyperparameters.values import add_core_model_to_fun_control
from spotPython.data.sklearn_hyper_dict import SklearnHyperDict
from spotPython.fun.hypersklearn import HyperSklearn
# core_model = RidgeCV
# core_model = GradientBoostingRegressor
# core_model = ElasticNet
# core_model = RandomForestClassifier
= SVC
core_model # core_model = LogisticRegression
# core_model = KNeighborsClassifier
# core_model = GradientBoostingClassifier
# core_model = HistGradientBoostingClassifier
=core_model,
add_core_model_to_fun_control(core_model=fun_control,
fun_control=SklearnHyperDict,
hyper_dict=None) filename
Now fun_control
has the information from the JSON file. The available hyperparameters are:
print(*fun_control["core_model_hyper_dict"].keys(), sep="\n")
C
kernel
degree
gamma
coef0
shrinking
probability
tol
cache_size
break_ties
15.6 Step 6: Modify hyper_dict
Hyperparameters for the Selected Algorithm aka core_model
15.6.1 Modify hyperparameter of type numeric and integer (boolean)
Numeric and boolean values can be modified using the modify_hyper_parameter_bounds
method. For example, to change the tol
hyperparameter of the SVC
model to the interval [1e-3, 1e-2], the following code can be used:
modify_hyper_parameter_bounds(fun_control, "tol", bounds=[1e-3, 1e-2])
from spotPython.hyperparameters.values import modify_hyper_parameter_bounds
"probability", bounds=[1, 1]) modify_hyper_parameter_bounds(fun_control,
15.6.2 Modify hyperparameter of type factor
spotPython
provides functions for modifying the hyperparameters, their bounds and factors as well as for activating and de-activating hyperparameters without re-compilation of the Python source code. These functions were described in Section 12.6.
Factors can be modified with the modify_hyper_parameter_levels
function. For example, to exclude the sigmoid
kernel from the tuning, the kernel
hyperparameter of the SVC
model can be modified as follows:
modify_hyper_parameter_levels(fun_control, "kernel", ["linear", "rbf"])
The new setting can be controlled via:
fun_control["core_model_hyper_dict"]["kernel"]
from spotPython.hyperparameters.values import modify_hyper_parameter_levels
"kernel", ["rbf"]) modify_hyper_parameter_levels(fun_control,
15.6.3 Optimizers
Optimizers are described in Section 12.6.1.
15.6.4 Selection of the Objective: Metric and Loss Functions
- Machine learning models are optimized with respect to a metric, for example, the
accuracy
function. - Deep learning, e.g., neural networks are optimized with respect to a loss function, for example, the
cross_entropy
function and evaluated with respect to a metric, for example, theaccuracy
function.
15.7 Step 7: Selection of the Objective (Loss) Function
The loss function, that is usually used in deep learning for optimizing the weights of the net, is stored in the fun_control
dictionary as "loss_function"
.
15.7.1 Metric Function
There are two different types of metrics in spotPython
:
"metric_river"
is used for the river based evaluation viaeval_oml_iter_progressive
."metric_sklearn"
is used for the sklearn based evaluation.
We will consider multi-class classification metrics, e.g., mapk_score
and top_k_accuracy_score
.
In this multi-class classification example the machine learning algorithm should return the probabilities of the specific classes ("predict_proba"
) instead of the predicted values.
We set "predict_proba"
to True
in the fun_control
dictionary.
15.7.1.1 The MAPK Metric
To select the MAPK metric, the following two entries can be added to the fun_control
dictionary:
"metric_sklearn": mapk_score"
"metric_params": {"k": 3}
.
15.7.1.2 Other Metrics
Alternatively, other metrics for multi-class classification can be used, e.g.,: * top_k_accuracy_score or * roc_auc_score
The metric roc_auc_score
requires the parameter "multi_class"
, e.g.,
"multi_class": "ovr"
.
This is set in the fun_control
dictionary.
spotPython
performs a minimization, therefore, metrics that should be maximized have to be multiplied by -1. This is done by setting "weights"
to -1
.
- The complete setup for the metric in our example is:
from spotPython.utils.metrics import mapk_score
fun_control.update({"weights": -1,
"metric_sklearn": mapk_score,
"predict_proba": True,
"metric_params": {"k": 3},
})
15.7.2 Evaluation on Hold-out Data
- The default method for computing the performance is
"eval_holdout"
. - Alternatively, cross-validation can be used for every machine learning model.
- Specifically for RandomForests, the OOB-score can be used.
fun_control.update({"eval": "train_hold_out",
})
15.7.2.1 Cross Validation
Instead of using the OOB-score, the classical cross validation can be used. The number of folds is set by the key "k_folds"
. For example, to use 5-fold cross validation, the key "k_folds"
is set to 5
. Uncomment the following line to use cross validation:
# fun_control.update({
# "eval": "train_cv",
# "k_folds": 10,
# })
15.8 Step 8: Calling the SPOT Function
15.8.1 Preparing the SPOT Call
- Get types and variable names as well as lower and upper bounds for the hyperparameters.
# extract the variable types, names, and bounds
from spotPython.hyperparameters.values import (get_bound_values,
get_var_name,
get_var_type,)= get_var_type(fun_control)
var_type = get_var_name(fun_control)
var_name = get_bound_values(fun_control, "lower")
lower = get_bound_values(fun_control, "upper") upper
from spotPython.utils.eda import gen_design_table
print(gen_design_table(fun_control))
| name | type | default | lower | upper | transform |
|-------------|--------|-----------|----------|---------|-------------|
| C | float | 1.0 | 0.1 | 10 | None |
| kernel | factor | rbf | 0 | 0 | None |
| degree | int | 3 | 3 | 3 | None |
| gamma | factor | scale | 0 | 1 | None |
| coef0 | float | 0.0 | 0 | 0 | None |
| shrinking | factor | 0 | 0 | 1 | None |
| probability | factor | 0 | 1 | 1 | None |
| tol | float | 0.001 | 0.0001 | 0.01 | None |
| cache_size | float | 200.0 | 100 | 400 | None |
| break_ties | factor | 0 | 0 | 1 | None |
15.8.2 The Objective Function
The objective function is selected next. It implements an interface from sklearn
’s training, validation, and testing methods to spotPython
.
from spotPython.fun.hypersklearn import HyperSklearn
= HyperSklearn().fun_sklearn fun
15.8.3 Run the Spot
Optimizer
- Run SPOT for approx. x mins (
max_time
). - Note: the run takes longer, because the evaluation time of initial design (here:
initi_size
, 20 points) is not considered.
from spotPython.hyperparameters.values import get_default_hyperparameters_as_array
= get_default_hyperparameters_as_array(fun_control)
X_start X_start
array([[1.e+00, 0.e+00, 3.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e-03,
2.e+02, 0.e+00]])
import numpy as np
from spotPython.spot import spot
from math import inf
= spot.Spot(fun=fun,
spot_tuner = lower,
lower = upper,
upper = inf,
fun_evals = 1,
fun_repeats = MAX_TIME,
max_time = False,
noise = np.sqrt(np.spacing(1)),
tolerance_x = var_type,
var_type = var_name,
var_name = "y",
infill_criterion = 1,
n_points =123,
seed= 50,
log_level = False,
show_models= True,
show_progress= fun_control,
fun_control ={"init_size": INIT_SIZE,
design_control"repeats": 1},
={"noise": True,
surrogate_control"cod_type": "norm",
"min_theta": -4,
"max_theta": 3,
"n_theta": len(var_name),
"model_fun_evals": 10_000,
"log_level": 50
})=X_start) spot_tuner.run(X_start
spotPython tuning: -0.38345864661654133 [----------] 0.39%
spotPython tuning: -0.38345864661654133 [----------] 0.75%
spotPython tuning: -0.38345864661654133 [----------] 1.10%
spotPython tuning: -0.38345864661654133 [----------] 1.41%
spotPython tuning: -0.38345864661654133 [----------] 2.01%
spotPython tuning: -0.38345864661654133 [----------] 2.48%
spotPython tuning: -0.38345864661654133 [----------] 2.96%
spotPython tuning: -0.38345864661654133 [----------] 3.37%
spotPython tuning: -0.38345864661654133 [----------] 3.89%
spotPython tuning: -0.38345864661654133 [----------] 4.32%
spotPython tuning: -0.38345864661654133 [----------] 4.81%
spotPython tuning: -0.38345864661654133 [#---------] 5.23%
spotPython tuning: -0.38345864661654133 [#---------] 5.91%
spotPython tuning: -0.38345864661654133 [#---------] 6.82%
spotPython tuning: -0.38345864661654133 [#---------] 8.72%
spotPython tuning: -0.38345864661654133 [#---------] 9.50%
spotPython tuning: -0.38345864661654133 [#---------] 10.49%
spotPython tuning: -0.38345864661654133 [#---------] 11.46%
spotPython tuning: -0.38345864661654133 [#---------] 12.29%
spotPython tuning: -0.38345864661654133 [#---------] 13.03%
spotPython tuning: -0.38721804511278196 [#---------] 13.90%
spotPython tuning: -0.38721804511278196 [#---------] 14.81%
spotPython tuning: -0.38721804511278196 [##--------] 16.02%
spotPython tuning: -0.38721804511278196 [##--------] 18.17%
spotPython tuning: -0.38721804511278196 [##--------] 19.23%
spotPython tuning: -0.38721804511278196 [##--------] 20.33%
spotPython tuning: -0.38721804511278196 [##--------] 21.43%
spotPython tuning: -0.38721804511278196 [##--------] 22.23%
spotPython tuning: -0.38721804511278196 [##--------] 23.46%
spotPython tuning: -0.38721804511278196 [##--------] 24.64%
spotPython tuning: -0.38721804511278196 [###-------] 25.92%
spotPython tuning: -0.38721804511278196 [###-------] 28.12%
spotPython tuning: -0.38721804511278196 [###-------] 29.30%
spotPython tuning: -0.38721804511278196 [###-------] 30.81%
spotPython tuning: -0.39348370927318294 [###-------] 32.51%
spotPython tuning: -0.39348370927318294 [###-------] 33.77%
spotPython tuning: -0.39348370927318294 [####------] 35.20%
spotPython tuning: -0.39348370927318294 [####------] 37.48%
spotPython tuning: -0.39348370927318294 [####------] 39.09%
spotPython tuning: -0.39348370927318294 [####------] 40.65%
spotPython tuning: -0.39348370927318294 [####------] 42.30%
spotPython tuning: -0.39348370927318294 [####------] 44.03%
spotPython tuning: -0.39348370927318294 [#####-----] 46.64%
spotPython tuning: -0.39348370927318294 [#####-----] 48.14%
spotPython tuning: -0.39348370927318294 [#####-----] 49.77%
spotPython tuning: -0.39348370927318294 [#####-----] 51.43%
spotPython tuning: -0.39348370927318294 [#####-----] 52.98%
spotPython tuning: -0.39348370927318294 [#####-----] 54.49%
spotPython tuning: -0.39348370927318294 [######----] 56.70%
spotPython tuning: -0.39348370927318294 [######----] 58.54%
spotPython tuning: -0.39348370927318294 [######----] 60.42%
spotPython tuning: -0.39348370927318294 [######----] 62.53%
spotPython tuning: -0.39348370927318294 [#######---] 65.59%
spotPython tuning: -0.39348370927318294 [#######---] 67.91%
spotPython tuning: -0.39348370927318294 [#######---] 70.51%
spotPython tuning: -0.39348370927318294 [#######---] 72.67%
spotPython tuning: -0.39348370927318294 [########--] 76.08%
spotPython tuning: -0.39348370927318294 [########--] 78.51%
spotPython tuning: -0.39348370927318294 [##########] 100.00% Done...
<spotPython.spot.spot.Spot at 0x2c1e03af0>
15.9 Step 9: Tensorboard
The textual output shown in the console (or code cell) can be visualized with Tensorboard as described in Section 12.9, see also the description in the documentation: Tensorboard.
15.10 Step 10: Results
After the hyperparameter tuning run is finished, the progress of the hyperparameter tuning can be visualized. The following code generates the progress plot from ?fig-progress.
=False,
spot_tuner.plot_progress(log_y="./figures/" + experiment_name+"_progress.png") filename
- Print the results
print(gen_design_table(fun_control=fun_control,
=spot_tuner)) spot
| name | type | default | lower | upper | tuned | transform | importance | stars |
|-------------|--------|-----------|---------|---------|----------------------|-------------|--------------|---------|
| C | float | 1.0 | 0.1 | 10.0 | 5.064760042488824 | None | 1.18 | * |
| kernel | factor | rbf | 0.0 | 0.0 | 0.0 | None | 0.00 | |
| degree | int | 3 | 3.0 | 3.0 | 3.0 | None | 0.00 | |
| gamma | factor | scale | 0.0 | 1.0 | 1.0 | None | 100.00 | *** |
| coef0 | float | 0.0 | 0.0 | 0.0 | 0.0 | None | 0.00 | |
| shrinking | factor | 0 | 0.0 | 1.0 | 0.0 | None | 7.81 | * |
| probability | factor | 0 | 1.0 | 1.0 | 1.0 | None | 0.00 | |
| tol | float | 0.001 | 0.0001 | 0.01 | 0.009905536500848609 | None | 0.10 | |
| cache_size | float | 200.0 | 100.0 | 400.0 | 136.5974675903489 | None | 0.00 | |
| break_ties | factor | 0 | 0.0 | 1.0 | 1.0 | None | 0.00 | |
15.10.1 Show variable importance
=0.025, filename="./figures/" + experiment_name+"_importance.png") spot_tuner.plot_importance(threshold
15.10.2 Get Default Hyperparameters
from spotPython.hyperparameters.values import get_default_values, transform_hyper_parameter_values
= get_default_values(fun_control)
values_default = transform_hyper_parameter_values(fun_control=fun_control, hyper_parameter_values=values_default)
values_default values_default
{'C': 1.0,
'kernel': 'rbf',
'degree': 3,
'gamma': 'scale',
'coef0': 0.0,
'shrinking': 0,
'probability': 0,
'tol': 0.001,
'cache_size': 200.0,
'break_ties': 0}
from sklearn.pipeline import make_pipeline
= make_pipeline(fun_control["prep_model"], fun_control["core_model"](**values_default))
model_default model_default
Pipeline(steps=[('nonetype', None), ('svc', SVC(break_ties=0, cache_size=200.0, probability=0, shrinking=0))])In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
Pipeline(steps=[('nonetype', None), ('svc', SVC(break_ties=0, cache_size=200.0, probability=0, shrinking=0))])
None
SVC(break_ties=0, cache_size=200.0, probability=0, shrinking=0)
- Default value for “probability” is False, but we need it to be True for the metric “mapk_score”.
"probability": 1}) values_default.update({
15.10.3 Get SPOT Results
= spot_tuner.to_all_dim(spot_tuner.min_X.reshape(1,-1))
X print(X)
[[5.06476004e+00 0.00000000e+00 3.00000000e+00 1.00000000e+00
0.00000000e+00 0.00000000e+00 1.00000000e+00 9.90553650e-03
1.36597468e+02 1.00000000e+00]]
from spotPython.hyperparameters.values import assign_values, return_conf_list_from_var_dict
= assign_values(X, fun_control["var_name"])
v_dict =v_dict, fun_control=fun_control) return_conf_list_from_var_dict(var_dict
[{'C': 5.064760042488824,
'kernel': 'rbf',
'degree': 3,
'gamma': 'auto',
'coef0': 0.0,
'shrinking': 0,
'probability': 1,
'tol': 0.009905536500848609,
'cache_size': 136.5974675903489,
'break_ties': 1}]
from spotPython.hyperparameters.values import get_one_sklearn_model_from_X
= get_one_sklearn_model_from_X(X, fun_control)
model_spot model_spot
SVC(C=5.064760042488824, break_ties=1, cache_size=136.5974675903489, gamma='auto', probability=1, shrinking=0, tol=0.009905536500848609)In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
SVC(C=5.064760042488824, break_ties=1, cache_size=136.5974675903489, gamma='auto', probability=1, shrinking=0, tol=0.009905536500848609)
15.10.4 Evaluate SPOT Results
- Fetch the data.
from spotPython.utils.convert import get_Xy_from_df
= get_Xy_from_df(fun_control["train"], fun_control["target_column"])
X_train, y_train = get_Xy_from_df(fun_control["test"], fun_control["target_column"])
X_test, y_test X_test.shape, y_test.shape
((177, 64), (177,))
- Fit the model with the tuned hyperparameters. This gives one result:
model_spot.fit(X_train, y_train)= model_spot.predict_proba(X_test)
y_pred = mapk_score(y_true=y_test, y_pred=y_pred, k=3)
res res
0.3691148775894538
def repeated_eval(n, model):
= []
res_values for i in range(n):
model.fit(X_train, y_train)= model.predict_proba(X_test)
y_pred = mapk_score(y_true=y_test, y_pred=y_pred, k=3)
res
res_values.append(res)= np.mean(res_values)
mean_res print(f"mean_res: {mean_res}")
= np.std(res_values)
std_res print(f"std_res: {std_res}")
= np.min(res_values)
min_res print(f"min_res: {min_res}")
= np.max(res_values)
max_res print(f"max_res: {max_res}")
= np.median(res_values)
median_res print(f"median_res: {median_res}")
return mean_res, std_res, min_res, max_res, median_res
15.10.5 Handling Non-deterministic Results
- Because the model is non-determinstic, we perform \(n=30\) runs and calculate the mean and standard deviation of the performance metric.
= repeated_eval(30, model_spot) _
mean_res: 0.37046453232893917
std_res: 0.004799386985354715
min_res: 0.36252354048964214
max_res: 0.3804143126177025
median_res: 0.37052730696798486
15.10.6 Evalution of the Default Hyperparameters
"svc"].probability = True
model_default["svc"] model_default.fit(X_train, y_train)[
SVC(break_ties=0, cache_size=200.0, probability=True, shrinking=0)In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
SVC(break_ties=0, cache_size=200.0, probability=True, shrinking=0)
- One evaluation of the default hyperparameters is performed on the hold-out test set.
= model_default.predict_proba(X_test)
y_pred =y_test, y_pred=y_pred, k=3) mapk_score(y_true
0.38888888888888884
Since one single evaluation is not meaningful, we perform, similar to the evaluation of the SPOT results, \(n=30\) runs of the default setting and and calculate the mean and standard deviation of the performance metric.
= repeated_eval(30, model_default) _
mean_res: 0.3848399246704331
std_res: 0.005316330033775537
min_res: 0.37476459510357824
max_res: 0.396421845574388
median_res: 0.3851224105461394
15.10.7 Plot: Compare Predictions
from spotPython.plot.validation import plot_confusion_matrix
= "Default") plot_confusion_matrix(model_default, fun_control, title
="SPOT") plot_confusion_matrix(model_spot, fun_control, title
min(spot_tuner.y), max(spot_tuner.y)
(-0.39348370927318294, -0.33583959899749366)
15.10.8 Cross-validated Evaluations
from spotPython.sklearn.traintest import evaluate_cv
fun_control.update({"eval": "train_cv",
"k_folds": 10,
})=model_spot, fun_control=fun_control, verbose=0) evaluate_cv(model
(0.34968553459119495, None)
fun_control.update({"eval": "test_cv",
"k_folds": 10,
})=model_spot, fun_control=fun_control, verbose=0) evaluate_cv(model
(0.37461873638344223, None)
- This is the evaluation that will be used in the comparison:
fun_control.update({"eval": "data_cv",
"k_folds": 10,
})=model_spot, fun_control=fun_control, verbose=0) evaluate_cv(model
(0.3605633802816901, None)
15.10.9 Detailed Hyperparameter Plots
= "./figures/" + experiment_name
filename =filename) spot_tuner.plot_important_hyperparameter_contour(filename
C: 1.1752619190262712
gamma: 100.0
shrinking: 7.814829945476416
tol: 0.09592199120674225
15.10.10 Parallel Coordinates Plot
spot_tuner.parallel_plot()
15.10.11 Plot all Combinations of Hyperparameters
- Warning: this may take a while.
= False
PLOT_ALL if PLOT_ALL:
= spot_tuner.k
n for i in range(n-1):
for j in range(i+1, n):
=i, j=j, min_z=min_z, max_z = max_z) spot_tuner.plot_contour(i