cpdbench.examples.Example_ValidationRuntime

 1from cpdbench.examples import ExampleAlgorithms
 2from cpdbench.examples.ExampleDatasets import get_extreme_large_dataset_from_file
 3from cpdbench.examples.ExampleMetrics import metric_accuracy_in_allowed_windows
 4from cpdbench.CPDBench import CPDBench
 5import pathlib
 6
 7cpdb = CPDBench()
 8
 9
10@cpdb.dataset
11def get_large_dataset():
12    return get_extreme_large_dataset_from_file(1000)
13
14
15@cpdb.algorithm
16def execute_algorithm(dataset):
17    dataset = dataset.reshape((1, dataset.size))
18    res = ExampleAlgorithms.algorithm_execute_single_esst(dataset)
19    assert dataset.ndim == 3
20    return res
21
22
23@cpdb.metric
24def compute_metric(indexes, confidences, ground_truths):
25    return metric_accuracy_in_allowed_windows(indexes, confidences, ground_truths, window_size=20)
26
27
28if __name__ == '__main__':
29    path = pathlib.Path(__file__).parent.resolve()
30    path = path.joinpath("configs", "VeryLargeDatasetConfig.yml")
31    #cpdb.start(config_file=str(path))
32    cpdb.validate(config_file=str(path))
cpdb = <cpdbench.CPDBench.CPDBench object>
@cpdb.dataset
def get_large_dataset():
11@cpdb.dataset
12def get_large_dataset():
13    return get_extreme_large_dataset_from_file(1000)
@cpdb.algorithm
def execute_algorithm(dataset):
16@cpdb.algorithm
17def execute_algorithm(dataset):
18    dataset = dataset.reshape((1, dataset.size))
19    res = ExampleAlgorithms.algorithm_execute_single_esst(dataset)
20    assert dataset.ndim == 3
21    return res
@cpdb.metric
def compute_metric(indexes, confidences, ground_truths):
24@cpdb.metric
25def compute_metric(indexes, confidences, ground_truths):
26    return metric_accuracy_in_allowed_windows(indexes, confidences, ground_truths, window_size=20)