-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathclassifier_parameter.py
82 lines (65 loc) · 2.37 KB
/
classifier_parameter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Finding Best Hyper Parameter for Classifier
=======================================================
In this example, we try to find the best hyper parameter of Classifier
(`method` and `regularization_weight`) by calculating precision for
possible hyper parameter values.
Datasets are randomly generated by using scikit-learn data generator.
"""
import sklearn.datasets
import sklearn.metrics
from jubakit.classifier import Classifier, Dataset, Config
# Generate a dummy dataset using scikit-learn.
(X, y) = sklearn.datasets.make_classification(
n_samples=512,
n_features=20,
n_informative=2,
n_redundant=2,
n_repeated=0,
n_classes=2,
n_clusters_per_class=2,
weights=None,
flip_y=0.01,
class_sep=1.0,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=True,
random_state=0, # fixed seed
)
# Convert arrays into jubakit Dataset.
dataset = Dataset.from_array(X, y)
# Try finding the best classifier parameter.
param2metrics = {}
for method in ['AROW', 'NHERD', 'CW']:
for rw in [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0]:
print('Running ({0} / regularization_weight = {1})...'.format(method, rw))
# Create a config data structure.
jubatus_config = Config(method=method, parameter={'regularization_weight': rw})
# It is equivalent to:
#jubatus_config = Config.default()
#jubatus_config['method'] = method
#jubatus_config['parameter']['regularization_weight'] = rw
# Launch Jubatus server using the specified configuration.
classifier = Classifier.run(jubatus_config)
# Train with the dataset.
for _ in classifier.train(dataset):
pass
# Classify with the same dataset.
y_true = []
y_pred = []
for (idx, label, result) in classifier.classify(dataset):
y_true.append(label)
y_pred.append(result[0][0])
classifier.stop()
# Store the metrics for current configuration.
param2metrics['{0} ({1})'.format(method, rw)] = sklearn.metrics.accuracy_score(y_true, y_pred)
# Show results for each hyper parameter.
best_C = sorted(param2metrics.keys(), key=lambda x: param2metrics[x], reverse=True)[0]
print('--------------------')
print('Configuration\tAccuracy')
for C in sorted(param2metrics.keys()):
print('{0}\t{1}\t{2}'.format(C, param2metrics[C], '*' if C == best_C else ''))