... | ... |
@@ -10,8 +10,6 @@ Tutorial `~adaptive.BalancingLearner` |
10 | 10 |
The complete source code of this tutorial can be found in |
11 | 11 |
:jupyter-download:notebook:`tutorial.BalancingLearner` |
12 | 12 |
|
13 |
-.. thebe-button:: Run the code live inside the documentation! |
|
14 |
- |
|
15 | 13 |
.. jupyter-execute:: |
16 | 14 |
:hide-code: |
17 | 15 |
|
... | ... |
@@ -10,6 +10,8 @@ Tutorial `~adaptive.BalancingLearner` |
10 | 10 |
The complete source code of this tutorial can be found in |
11 | 11 |
:jupyter-download:notebook:`tutorial.BalancingLearner` |
12 | 12 |
|
13 |
+.. thebe-button:: Run the code live inside the documentation! |
|
14 |
+ |
|
13 | 15 |
.. jupyter-execute:: |
14 | 16 |
:hide-code: |
15 | 17 |
|
... | ... |
@@ -57,7 +57,7 @@ The balancing learner can for example be used to implement a poor-man’s |
57 | 57 |
|
58 | 58 |
Often one wants to create a set of ``learner``\ s for a cartesian |
59 | 59 |
product of parameters. For that particular case we’ve added a |
60 |
-``classmethod`` called ``~adaptive.BalancingLearner.from_product``. |
|
60 |
+``classmethod`` called `~adaptive.BalancingLearner.from_product`. |
|
61 | 61 |
See how it works below |
62 | 62 |
|
63 | 63 |
.. jupyter-execute:: |
... | ... |
@@ -8,11 +8,10 @@ Tutorial `~adaptive.BalancingLearner` |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`BalancingLearner` |
|
11 |
+ :jupyter-download:notebook:`tutorial.BalancingLearner` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: BalancingLearner |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -30,7 +29,7 @@ improvement. |
30 | 29 |
The balancing learner can for example be used to implement a poor-man’s |
31 | 30 |
2D learner by using the `~adaptive.Learner1D`. |
32 | 31 |
|
33 |
-.. execute:: |
|
32 |
+.. jupyter-execute:: |
|
34 | 33 |
|
35 | 34 |
def h(x, offset=0): |
36 | 35 |
a = 0.01 |
... | ... |
@@ -42,16 +41,16 @@ The balancing learner can for example be used to implement a poor-man’s |
42 | 41 |
bal_learner = adaptive.BalancingLearner(learners) |
43 | 42 |
runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01) |
44 | 43 |
|
45 |
-.. execute:: |
|
44 |
+.. jupyter-execute:: |
|
46 | 45 |
:hide-code: |
47 | 46 |
|
48 | 47 |
await runner.task # This is not needed in a notebook environment! |
49 | 48 |
|
50 |
-.. execute:: |
|
49 |
+.. jupyter-execute:: |
|
51 | 50 |
|
52 | 51 |
runner.live_info() |
53 | 52 |
|
54 |
-.. execute:: |
|
53 |
+.. jupyter-execute:: |
|
55 | 54 |
|
56 | 55 |
plotter = lambda learner: hv.Overlay([L.plot() for L in learner.learners]) |
57 | 56 |
runner.live_plot(plotter=plotter, update_interval=0.1) |
... | ... |
@@ -61,7 +60,7 @@ product of parameters. For that particular case we’ve added a |
61 | 60 |
``classmethod`` called ``~adaptive.BalancingLearner.from_product``. |
62 | 61 |
See how it works below |
63 | 62 |
|
64 |
-.. execute:: |
|
63 |
+.. jupyter-execute:: |
|
65 | 64 |
|
66 | 65 |
from scipy.special import eval_jacobi |
67 | 66 |
|
1 | 1 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,83 @@ |
1 |
+Tutorial `~adaptive.BalancingLearner` |
|
2 |
+------------------------------------- |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`BalancingLearner` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: BalancingLearner |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+ import holoviews as hv |
|
21 |
+ import numpy as np |
|
22 |
+ from functools import partial |
|
23 |
+ import random |
|
24 |
+ |
|
25 |
+The balancing learner is a “meta-learner” that takes a list of learners. |
|
26 |
+When you request a point from the balancing learner, it will query all |
|
27 |
+of its “children” to figure out which one will give the most |
|
28 |
+improvement. |
|
29 |
+ |
|
30 |
+The balancing learner can for example be used to implement a poor-man’s |
|
31 |
+2D learner by using the `~adaptive.Learner1D`. |
|
32 |
+ |
|
33 |
+.. execute:: |
|
34 |
+ |
|
35 |
+ def h(x, offset=0): |
|
36 |
+ a = 0.01 |
|
37 |
+ return x + a**2 / (a**2 + (x - offset)**2) |
|
38 |
+ |
|
39 |
+ learners = [adaptive.Learner1D(partial(h, offset=random.uniform(-1, 1)), |
|
40 |
+ bounds=(-1, 1)) for i in range(10)] |
|
41 |
+ |
|
42 |
+ bal_learner = adaptive.BalancingLearner(learners) |
|
43 |
+ runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01) |
|
44 |
+ |
|
45 |
+.. execute:: |
|
46 |
+ :hide-code: |
|
47 |
+ |
|
48 |
+ await runner.task # This is not needed in a notebook environment! |
|
49 |
+ |
|
50 |
+.. execute:: |
|
51 |
+ |
|
52 |
+ runner.live_info() |
|
53 |
+ |
|
54 |
+.. execute:: |
|
55 |
+ |
|
56 |
+ plotter = lambda learner: hv.Overlay([L.plot() for L in learner.learners]) |
|
57 |
+ runner.live_plot(plotter=plotter, update_interval=0.1) |
|
58 |
+ |
|
59 |
+Often one wants to create a set of ``learner``\ s for a cartesian |
|
60 |
+product of parameters. For that particular case we’ve added a |
|
61 |
+``classmethod`` called ``~adaptive.BalancingLearner.from_product``. |
|
62 |
+See how it works below |
|
63 |
+ |
|
64 |
+.. execute:: |
|
65 |
+ |
|
66 |
+ from scipy.special import eval_jacobi |
|
67 |
+ |
|
68 |
+ def jacobi(x, n, alpha, beta): return eval_jacobi(n, alpha, beta, x) |
|
69 |
+ |
|
70 |
+ combos = { |
|
71 |
+ 'n': [1, 2, 4, 8], |
|
72 |
+ 'alpha': np.linspace(0, 2, 3), |
|
73 |
+ 'beta': np.linspace(0, 1, 5), |
|
74 |
+ } |
|
75 |
+ |
|
76 |
+ learner = adaptive.BalancingLearner.from_product( |
|
77 |
+ jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos) |
|
78 |
+ |
|
79 |
+ runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) |
|
80 |
+ |
|
81 |
+ # The `cdims` will automatically be set when using `from_product`, so |
|
82 |
+ # `plot()` will return a HoloMap with correctly labeled sliders. |
|
83 |
+ learner.plot().overlay('beta').grid().select(y=(-1, 3)) |