... | ... |
@@ -10,8 +10,6 @@ Tutorial `~adaptive.SKOptLearner` |
10 | 10 |
The complete source code of this tutorial can be found in |
11 | 11 |
:jupyter-download:notebook:`tutorial.SKOptLearner` |
12 | 12 |
|
13 |
-.. thebe-button:: Run the code live inside the documentation! |
|
14 |
- |
|
15 | 13 |
.. jupyter-execute:: |
16 | 14 |
:hide-code: |
17 | 15 |
|
... | ... |
@@ -10,6 +10,8 @@ Tutorial `~adaptive.SKOptLearner` |
10 | 10 |
The complete source code of this tutorial can be found in |
11 | 11 |
:jupyter-download:notebook:`tutorial.SKOptLearner` |
12 | 12 |
|
13 |
+.. thebe-button:: Run the code live inside the documentation! |
|
14 |
+ |
|
13 | 15 |
.. jupyter-execute:: |
14 | 16 |
:hide-code: |
15 | 17 |
|
... | ... |
@@ -8,11 +8,10 @@ Tutorial `~adaptive.SKOptLearner` |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`SKOptLearner` |
|
11 |
+ :jupyter-download:notebook:`tutorial.SKOptLearner` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: SKOptLearner |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -33,13 +32,13 @@ Although ``SKOptLearner`` can optimize functions of arbitrary |
33 | 32 |
dimensionality, we can only plot the learner if a 1D function is being |
34 | 33 |
learned. |
35 | 34 |
|
36 |
-.. execute:: |
|
35 |
+.. jupyter-execute:: |
|
37 | 36 |
|
38 | 37 |
def F(x, noise_level=0.1): |
39 | 38 |
return (np.sin(5 * x) * (1 - np.tanh(x ** 2)) |
40 | 39 |
+ np.random.randn() * noise_level) |
41 | 40 |
|
42 |
-.. execute:: |
|
41 |
+.. jupyter-execute:: |
|
43 | 42 |
|
44 | 43 |
learner = adaptive.SKOptLearner(F, dimensions=[(-2., 2.)], |
45 | 44 |
base_estimator="GP", |
... | ... |
@@ -48,16 +47,16 @@ learned. |
48 | 47 |
) |
49 | 48 |
runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 40) |
50 | 49 |
|
51 |
-.. execute:: |
|
50 |
+.. jupyter-execute:: |
|
52 | 51 |
:hide-code: |
53 | 52 |
|
54 | 53 |
await runner.task # This is not needed in a notebook environment! |
55 | 54 |
|
56 |
-.. execute:: |
|
55 |
+.. jupyter-execute:: |
|
57 | 56 |
|
58 | 57 |
runner.live_info() |
59 | 58 |
|
60 |
-.. execute:: |
|
59 |
+.. jupyter-execute:: |
|
61 | 60 |
|
62 | 61 |
%%opts Overlay [legend_position='top'] |
63 | 62 |
xs = np.linspace(*learner.space.bounds[0]) |
1 | 1 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,66 @@ |
1 |
+Tutorial `~adaptive.SKOptLearner` |
|
2 |
+--------------------------------- |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`SKOptLearner` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: SKOptLearner |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+ import holoviews as hv |
|
21 |
+ import numpy as np |
|
22 |
+ |
|
23 |
+We have wrapped the ``Optimizer`` class from |
|
24 |
+`scikit-optimize <https://github.com/scikit-optimize/scikit-optimize>`__, |
|
25 |
+to show how existing libraries can be integrated with ``adaptive``. |
|
26 |
+ |
|
27 |
+The ``SKOptLearner`` attempts to “optimize” the given function ``g`` |
|
28 |
+(i.e. find the global minimum of ``g`` in the window of interest). |
|
29 |
+ |
|
30 |
+Here we use the same example as in the ``scikit-optimize`` |
|
31 |
+`tutorial <https://github.com/scikit-optimize/scikit-optimize/blob/master/examples/ask-and-tell.ipynb>`__. |
|
32 |
+Although ``SKOptLearner`` can optimize functions of arbitrary |
|
33 |
+dimensionality, we can only plot the learner if a 1D function is being |
|
34 |
+learned. |
|
35 |
+ |
|
36 |
+.. execute:: |
|
37 |
+ |
|
38 |
+ def F(x, noise_level=0.1): |
|
39 |
+ return (np.sin(5 * x) * (1 - np.tanh(x ** 2)) |
|
40 |
+ + np.random.randn() * noise_level) |
|
41 |
+ |
|
42 |
+.. execute:: |
|
43 |
+ |
|
44 |
+ learner = adaptive.SKOptLearner(F, dimensions=[(-2., 2.)], |
|
45 |
+ base_estimator="GP", |
|
46 |
+ acq_func="gp_hedge", |
|
47 |
+ acq_optimizer="lbfgs", |
|
48 |
+ ) |
|
49 |
+ runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 40) |
|
50 |
+ |
|
51 |
+.. execute:: |
|
52 |
+ :hide-code: |
|
53 |
+ |
|
54 |
+ await runner.task # This is not needed in a notebook environment! |
|
55 |
+ |
|
56 |
+.. execute:: |
|
57 |
+ |
|
58 |
+ runner.live_info() |
|
59 |
+ |
|
60 |
+.. execute:: |
|
61 |
+ |
|
62 |
+ %%opts Overlay [legend_position='top'] |
|
63 |
+ xs = np.linspace(*learner.space.bounds[0]) |
|
64 |
+ to_learn = hv.Curve((xs, [F(x, 0) for x in xs]), label='to learn') |
|
65 |
+ |
|
66 |
+ runner.live_plot().relabel('prediction', depth=2) * to_learn |