... | ... |
@@ -36,7 +36,7 @@ Examples |
36 | 36 |
Here are some examples of how Adaptive samples vs. homogeneous sampling. Click |
37 | 37 |
on the *Play* :fa:`play` button or move the sliders. |
38 | 38 |
|
39 |
-.. execute:: |
|
39 |
+.. jupyter-execute:: |
|
40 | 40 |
:hide-code: |
41 | 41 |
|
42 | 42 |
import itertools |
... | ... |
@@ -52,7 +52,7 @@ on the *Play* :fa:`play` button or move the sliders. |
52 | 52 |
`adaptive.Learner1D` |
53 | 53 |
~~~~~~~~~~~~~~~~~~~~ |
54 | 54 |
|
55 |
-.. execute:: |
|
55 |
+.. jupyter-execute:: |
|
56 | 56 |
:hide-code: |
57 | 57 |
|
58 | 58 |
%%opts Layout [toolbar=None] |
... | ... |
@@ -87,7 +87,7 @@ on the *Play* :fa:`play` button or move the sliders. |
87 | 87 |
`adaptive.Learner2D` |
88 | 88 |
~~~~~~~~~~~~~~~~~~~~ |
89 | 89 |
|
90 |
-.. execute:: |
|
90 |
+.. jupyter-execute:: |
|
91 | 91 |
:hide-code: |
92 | 92 |
|
93 | 93 |
def ring(xy): |
... | ... |
@@ -116,7 +116,7 @@ on the *Play* :fa:`play` button or move the sliders. |
116 | 116 |
`adaptive.AverageLearner` |
117 | 117 |
~~~~~~~~~~~~~~~~~~~~~~~~~ |
118 | 118 |
|
119 |
-.. execute:: |
|
119 |
+.. jupyter-execute:: |
|
120 | 120 |
:hide-code: |
121 | 121 |
|
122 | 122 |
def g(n): |
... | ... |
@@ -8,11 +8,10 @@ Tutorial `~adaptive.AverageLearner` |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`AverageLearner` |
|
11 |
+ :jupyter-download:notebook:`tutorial.AverageLearner` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: AverageLearner |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -25,7 +24,7 @@ the learner must formally take a single parameter, which should be used |
25 | 24 |
like a “seed” for the (pseudo-) random variable (although in the current |
26 | 25 |
implementation the seed parameter can be ignored by the function). |
27 | 26 |
|
28 |
-.. execute:: |
|
27 |
+.. jupyter-execute:: |
|
29 | 28 |
|
30 | 29 |
def g(n): |
31 | 30 |
import random |
... | ... |
@@ -38,20 +37,20 @@ implementation the seed parameter can be ignored by the function). |
38 | 37 |
random.setstate(state) |
39 | 38 |
return val |
40 | 39 |
|
41 |
-.. execute:: |
|
40 |
+.. jupyter-execute:: |
|
42 | 41 |
|
43 | 42 |
learner = adaptive.AverageLearner(g, atol=None, rtol=0.01) |
44 | 43 |
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 2) |
45 | 44 |
|
46 |
-.. execute:: |
|
45 |
+.. jupyter-execute:: |
|
47 | 46 |
:hide-code: |
48 | 47 |
|
49 | 48 |
await runner.task # This is not needed in a notebook environment! |
50 | 49 |
|
51 |
-.. execute:: |
|
50 |
+.. jupyter-execute:: |
|
52 | 51 |
|
53 | 52 |
runner.live_info() |
54 | 53 |
|
55 |
-.. execute:: |
|
54 |
+.. jupyter-execute:: |
|
56 | 55 |
|
57 | 56 |
runner.live_plot(update_interval=0.1) |
... | ... |
@@ -8,11 +8,10 @@ Tutorial `~adaptive.BalancingLearner` |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`BalancingLearner` |
|
11 |
+ :jupyter-download:notebook:`tutorial.BalancingLearner` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: BalancingLearner |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -30,7 +29,7 @@ improvement. |
30 | 29 |
The balancing learner can for example be used to implement a poor-man’s |
31 | 30 |
2D learner by using the `~adaptive.Learner1D`. |
32 | 31 |
|
33 |
-.. execute:: |
|
32 |
+.. jupyter-execute:: |
|
34 | 33 |
|
35 | 34 |
def h(x, offset=0): |
36 | 35 |
a = 0.01 |
... | ... |
@@ -42,16 +41,16 @@ The balancing learner can for example be used to implement a poor-man’s |
42 | 41 |
bal_learner = adaptive.BalancingLearner(learners) |
43 | 42 |
runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01) |
44 | 43 |
|
45 |
-.. execute:: |
|
44 |
+.. jupyter-execute:: |
|
46 | 45 |
:hide-code: |
47 | 46 |
|
48 | 47 |
await runner.task # This is not needed in a notebook environment! |
49 | 48 |
|
50 |
-.. execute:: |
|
49 |
+.. jupyter-execute:: |
|
51 | 50 |
|
52 | 51 |
runner.live_info() |
53 | 52 |
|
54 |
-.. execute:: |
|
53 |
+.. jupyter-execute:: |
|
55 | 54 |
|
56 | 55 |
plotter = lambda learner: hv.Overlay([L.plot() for L in learner.learners]) |
57 | 56 |
runner.live_plot(plotter=plotter, update_interval=0.1) |
... | ... |
@@ -61,7 +60,7 @@ product of parameters. For that particular case we’ve added a |
61 | 60 |
``classmethod`` called ``~adaptive.BalancingLearner.from_product``. |
62 | 61 |
See how it works below |
63 | 62 |
|
64 |
-.. execute:: |
|
63 |
+.. jupyter-execute:: |
|
65 | 64 |
|
66 | 65 |
from scipy.special import eval_jacobi |
67 | 66 |
|
... | ... |
@@ -8,11 +8,10 @@ Tutorial `~adaptive.DataSaver` |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`DataSaver` |
|
11 |
+ :jupyter-download:notebook:`tutorial.DataSaver` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: DataSaver |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -23,7 +22,7 @@ metadata, you can wrap your learner in an `adaptive.DataSaver`. |
23 | 22 |
In the following example the function to be learned returns its result |
24 | 23 |
and the execution time in a dictionary: |
25 | 24 |
|
26 |
-.. execute:: |
|
25 |
+.. jupyter-execute:: |
|
27 | 26 |
|
28 | 27 |
from operator import itemgetter |
29 | 28 |
|
... | ... |
@@ -48,20 +47,20 @@ and the execution time in a dictionary: |
48 | 47 |
``learner.learner`` is the original learner, so |
49 | 48 |
``learner.learner.loss()`` will call the correct loss method. |
50 | 49 |
|
51 |
-.. execute:: |
|
50 |
+.. jupyter-execute:: |
|
52 | 51 |
|
53 | 52 |
runner = adaptive.Runner(learner, goal=lambda l: l.learner.loss() < 0.1) |
54 | 53 |
|
55 |
-.. execute:: |
|
54 |
+.. jupyter-execute:: |
|
56 | 55 |
:hide-code: |
57 | 56 |
|
58 | 57 |
await runner.task # This is not needed in a notebook environment! |
59 | 58 |
|
60 |
-.. execute:: |
|
59 |
+.. jupyter-execute:: |
|
61 | 60 |
|
62 | 61 |
runner.live_info() |
63 | 62 |
|
64 |
-.. execute:: |
|
63 |
+.. jupyter-execute:: |
|
65 | 64 |
|
66 | 65 |
runner.live_plot(plotter=lambda l: l.learner.plot(), update_interval=0.1) |
67 | 66 |
|
... | ... |
@@ -69,6 +68,6 @@ Now the ``DataSavingLearner`` will have an dictionary attribute |
69 | 68 |
``extra_data`` that has ``x`` as key and the data that was returned by |
70 | 69 |
``learner.function`` as values. |
71 | 70 |
|
72 |
-.. execute:: |
|
71 |
+.. jupyter-execute:: |
|
73 | 72 |
|
74 | 73 |
learner.extra_data |
... | ... |
@@ -8,11 +8,10 @@ Tutorial `~adaptive.IntegratorLearner` |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`IntegratorLearner` |
|
11 |
+ :jupyter-download:notebook:`tutorial.IntegratorLearner` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: IntegratorLearner |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -27,7 +26,7 @@ of the integral with it. It is based on Pedro Gonnet’s |
27 | 26 |
Let’s try the following function with cusps (that is difficult to |
28 | 27 |
integrate): |
29 | 28 |
|
30 |
-.. execute:: |
|
29 |
+.. jupyter-execute:: |
|
31 | 30 |
|
32 | 31 |
def f24(x): |
33 | 32 |
return np.floor(np.exp(x)) |
... | ... |
@@ -40,7 +39,7 @@ let’s try a familiar function integrator `scipy.integrate.quad`, which |
40 | 39 |
will give us warnings that it encounters difficulties (if we run it |
41 | 40 |
in a notebook.) |
42 | 41 |
|
43 |
-.. execute:: |
|
42 |
+.. jupyter-execute:: |
|
44 | 43 |
|
45 | 44 |
import scipy.integrate |
46 | 45 |
scipy.integrate.quad(f24, 0, 3) |
... | ... |
@@ -50,7 +49,7 @@ we want to reach. Then in the `~adaptive.Runner` we pass |
50 | 49 |
``goal=lambda l: l.done()`` where ``learner.done()`` is ``True`` when |
51 | 50 |
the relative tolerance has been reached. |
52 | 51 |
|
53 |
-.. execute:: |
|
52 |
+.. jupyter-execute:: |
|
54 | 53 |
|
55 | 54 |
from adaptive.runner import SequentialExecutor |
56 | 55 |
|
... | ... |
@@ -61,24 +60,24 @@ the relative tolerance has been reached. |
61 | 60 |
# the overhead of evaluating the function in another process. |
62 | 61 |
runner = adaptive.Runner(learner, executor=SequentialExecutor(), goal=lambda l: l.done()) |
63 | 62 |
|
64 |
-.. execute:: |
|
63 |
+.. jupyter-execute:: |
|
65 | 64 |
:hide-code: |
66 | 65 |
|
67 | 66 |
await runner.task # This is not needed in a notebook environment! |
68 | 67 |
|
69 |
-.. execute:: |
|
68 |
+.. jupyter-execute:: |
|
70 | 69 |
|
71 | 70 |
runner.live_info() |
72 | 71 |
|
73 | 72 |
Now we could do the live plotting again, but lets just wait untill the |
74 | 73 |
runner is done. |
75 | 74 |
|
76 |
-.. execute:: |
|
75 |
+.. jupyter-execute:: |
|
77 | 76 |
|
78 | 77 |
if not runner.task.done(): |
79 | 78 |
raise RuntimeError('Wait for the runner to finish before executing the cells below!') |
80 | 79 |
|
81 |
-.. execute:: |
|
80 |
+.. jupyter-execute:: |
|
82 | 81 |
|
83 | 82 |
print('The integral value is {} with the corresponding error of {}'.format(learner.igral, learner.err)) |
84 | 83 |
learner.plot() |
... | ... |
@@ -8,11 +8,10 @@ Tutorial `~adaptive.Learner1D` |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`Learner1D` |
|
11 |
+ :jupyter-download:notebook:`tutorial.Learner1D` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: Learner1D |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -30,7 +29,7 @@ We start with the most common use-case: sampling a 1D function |
30 | 29 |
We will use the following function, which is a smooth (linear) |
31 | 30 |
background with a sharp peak at a random location: |
32 | 31 |
|
33 |
-.. execute:: |
|
32 |
+.. jupyter-execute:: |
|
34 | 33 |
|
35 | 34 |
offset = random.uniform(-0.5, 0.5) |
36 | 35 |
|
... | ... |
@@ -47,7 +46,7 @@ We start by initializing a 1D “learner”, which will suggest points to |
47 | 46 |
evaluate, and adapt its suggestions as more and more points are |
48 | 47 |
evaluated. |
49 | 48 |
|
50 |
-.. execute:: |
|
49 |
+.. jupyter-execute:: |
|
51 | 50 |
|
52 | 51 |
learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
53 | 52 |
|
... | ... |
@@ -61,13 +60,13 @@ On Windows systems the runner will try to use a `distributed.Client` |
61 | 60 |
if `distributed` is installed. A `~concurrent.futures.ProcessPoolExecutor` |
62 | 61 |
cannot be used on Windows for reasons. |
63 | 62 |
|
64 |
-.. execute:: |
|
63 |
+.. jupyter-execute:: |
|
65 | 64 |
|
66 | 65 |
# The end condition is when the "loss" is less than 0.1. In the context of the |
67 | 66 |
# 1D learner this means that we will resolve features in 'func' with width 0.1 or wider. |
68 | 67 |
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
69 | 68 |
|
70 |
-.. execute:: |
|
69 |
+.. jupyter-execute:: |
|
71 | 70 |
:hide-code: |
72 | 71 |
|
73 | 72 |
await runner.task # This is not needed in a notebook environment! |
... | ... |
@@ -76,23 +75,23 @@ When instantiated in a Jupyter notebook the runner does its job in the |
76 | 75 |
background and does not block the IPython kernel. We can use this to |
77 | 76 |
create a plot that updates as new data arrives: |
78 | 77 |
|
79 |
-.. execute:: |
|
78 |
+.. jupyter-execute:: |
|
80 | 79 |
|
81 | 80 |
runner.live_info() |
82 | 81 |
|
83 |
-.. execute:: |
|
82 |
+.. jupyter-execute:: |
|
84 | 83 |
|
85 | 84 |
runner.live_plot(update_interval=0.1) |
86 | 85 |
|
87 | 86 |
We can now compare the adaptive sampling to a homogeneous sampling with |
88 | 87 |
the same number of points: |
89 | 88 |
|
90 |
-.. execute:: |
|
89 |
+.. jupyter-execute:: |
|
91 | 90 |
|
92 | 91 |
if not runner.task.done(): |
93 | 92 |
raise RuntimeError('Wait for the runner to finish before executing the cells below!') |
94 | 93 |
|
95 |
-.. execute:: |
|
94 |
+.. jupyter-execute:: |
|
96 | 95 |
|
97 | 96 |
learner2 = adaptive.Learner1D(f, bounds=learner.bounds) |
98 | 97 |
|
... | ... |
@@ -107,7 +106,7 @@ vector output: ``f:ℝ → ℝ^N`` |
107 | 106 |
|
108 | 107 |
Sometimes you may want to learn a function with vector output: |
109 | 108 |
|
110 |
-.. execute:: |
|
109 |
+.. jupyter-execute:: |
|
111 | 110 |
|
112 | 111 |
random.seed(0) |
113 | 112 |
offsets = [random.uniform(-0.8, 0.8) for _ in range(3)] |
... | ... |
@@ -121,20 +120,20 @@ Sometimes you may want to learn a function with vector output: |
121 | 120 |
``adaptive`` has you covered! The ``Learner1D`` can be used for such |
122 | 121 |
functions: |
123 | 122 |
|
124 |
-.. execute:: |
|
123 |
+.. jupyter-execute:: |
|
125 | 124 |
|
126 | 125 |
learner = adaptive.Learner1D(f_levels, bounds=(-1, 1)) |
127 | 126 |
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
128 | 127 |
|
129 |
-.. execute:: |
|
128 |
+.. jupyter-execute:: |
|
130 | 129 |
:hide-code: |
131 | 130 |
|
132 | 131 |
await runner.task # This is not needed in a notebook environment! |
133 | 132 |
|
134 |
-.. execute:: |
|
133 |
+.. jupyter-execute:: |
|
135 | 134 |
|
136 | 135 |
runner.live_info() |
137 | 136 |
|
138 |
-.. execute:: |
|
137 |
+.. jupyter-execute:: |
|
139 | 138 |
|
140 | 139 |
runner.live_plot(update_interval=0.1) |
... | ... |
@@ -8,11 +8,10 @@ Tutorial `~adaptive.Learner2D` |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`Learner2D` |
|
11 |
+ :jupyter-download:notebook:`tutorial.Learner2D` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: Learner2D |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -23,7 +22,7 @@ Tutorial `~adaptive.Learner2D` |
23 | 22 |
Besides 1D functions, we can also learn 2D functions: |
24 | 23 |
:math:`\ f: ℝ^2 → ℝ`. |
25 | 24 |
|
26 |
-.. execute:: |
|
25 |
+.. jupyter-execute:: |
|
27 | 26 |
|
28 | 27 |
def ring(xy, wait=True): |
29 | 28 |
import numpy as np |
... | ... |
@@ -37,20 +36,20 @@ Besides 1D functions, we can also learn 2D functions: |
37 | 36 |
|
38 | 37 |
learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) |
39 | 38 |
|
40 |
-.. execute:: |
|
39 |
+.. jupyter-execute:: |
|
41 | 40 |
|
42 | 41 |
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
43 | 42 |
|
44 |
-.. execute:: |
|
43 |
+.. jupyter-execute:: |
|
45 | 44 |
:hide-code: |
46 | 45 |
|
47 | 46 |
await runner.task # This is not needed in a notebook environment! |
48 | 47 |
|
49 |
-.. execute:: |
|
48 |
+.. jupyter-execute:: |
|
50 | 49 |
|
51 | 50 |
runner.live_info() |
52 | 51 |
|
53 |
-.. execute:: |
|
52 |
+.. jupyter-execute:: |
|
54 | 53 |
|
55 | 54 |
def plot(learner): |
56 | 55 |
plot = learner.plot(tri_alpha=0.2) |
... | ... |
@@ -58,7 +57,7 @@ Besides 1D functions, we can also learn 2D functions: |
58 | 57 |
|
59 | 58 |
runner.live_plot(plotter=plot, update_interval=0.1) |
60 | 59 |
|
61 |
-.. execute:: |
|
60 |
+.. jupyter-execute:: |
|
62 | 61 |
|
63 | 62 |
%%opts EdgePaths (color='w') |
64 | 63 |
|
... | ... |
@@ -8,11 +8,10 @@ Tutorial `~adaptive.LearnerND` |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`LearnerND` |
|
11 |
+ :jupyter-download:notebook:`tutorial.LearnerND` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: LearnerND |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -33,7 +32,7 @@ Do keep in mind the speed and |
33 | 32 |
`effectiveness <https://en.wikipedia.org/wiki/Curse_of_dimensionality>`__ |
34 | 33 |
of the learner drops quickly with increasing number of dimensions. |
35 | 34 |
|
36 |
-.. execute:: |
|
35 |
+.. jupyter-execute:: |
|
37 | 36 |
|
38 | 37 |
# this step takes a lot of time, it will finish at about 3300 points, which can take up to 6 minutes |
39 | 38 |
def sphere(xyz): |
... | ... |
@@ -44,18 +43,18 @@ of the learner drops quickly with increasing number of dimensions. |
44 | 43 |
learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)]) |
45 | 44 |
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
46 | 45 |
|
47 |
-.. execute:: |
|
46 |
+.. jupyter-execute:: |
|
48 | 47 |
:hide-code: |
49 | 48 |
|
50 | 49 |
await runner.task # This is not needed in a notebook environment! |
51 | 50 |
|
52 |
-.. execute:: |
|
51 |
+.. jupyter-execute:: |
|
53 | 52 |
|
54 | 53 |
runner.live_info() |
55 | 54 |
|
56 | 55 |
Let’s plot 2D slices of the 3D function |
57 | 56 |
|
58 |
-.. execute:: |
|
57 |
+.. jupyter-execute:: |
|
59 | 58 |
|
60 | 59 |
def plot_cut(x, direction, learner=learner): |
61 | 60 |
cut_mapping = {'XYZ'.index(direction): x} |
... | ... |
@@ -70,7 +69,7 @@ Let’s plot 2D slices of the 3D function |
70 | 69 |
|
71 | 70 |
Or we can plot 1D slices |
72 | 71 |
|
73 |
-.. execute:: |
|
72 |
+.. jupyter-execute:: |
|
74 | 73 |
|
75 | 74 |
%%opts Path {+framewise} |
76 | 75 |
def plot_cut(x1, x2, directions, learner=learner): |
... | ... |
@@ -8,11 +8,10 @@ Tutorial `~adaptive.SKOptLearner` |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`SKOptLearner` |
|
11 |
+ :jupyter-download:notebook:`tutorial.SKOptLearner` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: SKOptLearner |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -33,13 +32,13 @@ Although ``SKOptLearner`` can optimize functions of arbitrary |
33 | 32 |
dimensionality, we can only plot the learner if a 1D function is being |
34 | 33 |
learned. |
35 | 34 |
|
36 |
-.. execute:: |
|
35 |
+.. jupyter-execute:: |
|
37 | 36 |
|
38 | 37 |
def F(x, noise_level=0.1): |
39 | 38 |
return (np.sin(5 * x) * (1 - np.tanh(x ** 2)) |
40 | 39 |
+ np.random.randn() * noise_level) |
41 | 40 |
|
42 |
-.. execute:: |
|
41 |
+.. jupyter-execute:: |
|
43 | 42 |
|
44 | 43 |
learner = adaptive.SKOptLearner(F, dimensions=[(-2., 2.)], |
45 | 44 |
base_estimator="GP", |
... | ... |
@@ -48,16 +47,16 @@ learned. |
48 | 47 |
) |
49 | 48 |
runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 40) |
50 | 49 |
|
51 |
-.. execute:: |
|
50 |
+.. jupyter-execute:: |
|
52 | 51 |
:hide-code: |
53 | 52 |
|
54 | 53 |
await runner.task # This is not needed in a notebook environment! |
55 | 54 |
|
56 |
-.. execute:: |
|
55 |
+.. jupyter-execute:: |
|
57 | 56 |
|
58 | 57 |
runner.live_info() |
59 | 58 |
|
60 |
-.. execute:: |
|
59 |
+.. jupyter-execute:: |
|
61 | 60 |
|
62 | 61 |
%%opts Overlay [legend_position='top'] |
63 | 62 |
xs = np.linspace(*learner.space.bounds[0]) |
... | ... |
@@ -8,11 +8,10 @@ Advanced Topics |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`advanced-topics` |
|
11 |
+ :jupyter-download:notebook:`tutorial.advanced-topics` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: advanced-topics |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -45,7 +44,7 @@ The second way *must be used* when saving the ``learner``\s of a |
45 | 44 |
By default the resulting pickle files are compressed, to turn this off |
46 | 45 |
use ``learner.save(fname=..., compress=False)`` |
47 | 46 |
|
48 |
-.. execute:: |
|
47 |
+.. jupyter-execute:: |
|
49 | 48 |
|
50 | 49 |
# Let's create two learners and run only one. |
51 | 50 |
learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
... | ... |
@@ -54,16 +53,16 @@ use ``learner.save(fname=..., compress=False)`` |
54 | 53 |
# Let's only run the learner |
55 | 54 |
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
56 | 55 |
|
57 |
-.. execute:: |
|
56 |
+.. jupyter-execute:: |
|
58 | 57 |
:hide-code: |
59 | 58 |
|
60 | 59 |
await runner.task # This is not needed in a notebook environment! |
61 | 60 |
|
62 |
-.. execute:: |
|
61 |
+.. jupyter-execute:: |
|
63 | 62 |
|
64 | 63 |
runner.live_info() |
65 | 64 |
|
66 |
-.. execute:: |
|
65 |
+.. jupyter-execute:: |
|
67 | 66 |
|
68 | 67 |
fname = 'data/example_file.p' |
69 | 68 |
learner.save(fname) |
... | ... |
@@ -74,7 +73,7 @@ use ``learner.save(fname=..., compress=False)`` |
74 | 73 |
|
75 | 74 |
Or just (without saving): |
76 | 75 |
|
77 |
-.. execute:: |
|
76 |
+.. jupyter-execute:: |
|
78 | 77 |
|
79 | 78 |
control = adaptive.Learner1D(f, bounds=(-1, 1)) |
80 | 79 |
control.copy_from(learner) |
... | ... |
@@ -82,7 +81,7 @@ Or just (without saving): |
82 | 81 |
One can also periodically save the learner while running in a |
83 | 82 |
`~adaptive.Runner`. Use it like: |
84 | 83 |
|
85 |
-.. execute:: |
|
84 |
+.. jupyter-execute:: |
|
86 | 85 |
|
87 | 86 |
def slow_f(x): |
88 | 87 |
from time import sleep |
... | ... |
@@ -93,17 +92,17 @@ One can also periodically save the learner while running in a |
93 | 92 |
runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 100) |
94 | 93 |
runner.start_periodic_saving(save_kwargs=dict(fname='data/periodic_example.p'), interval=6) |
95 | 94 |
|
96 |
-.. execute:: |
|
95 |
+.. jupyter-execute:: |
|
97 | 96 |
:hide-code: |
98 | 97 |
|
99 | 98 |
await asyncio.sleep(6) # This is not needed in a notebook environment! |
100 | 99 |
runner.cancel() |
101 | 100 |
|
102 |
-.. execute:: |
|
101 |
+.. jupyter-execute:: |
|
103 | 102 |
|
104 | 103 |
runner.live_info() # we cancelled it after 6 seconds |
105 | 104 |
|
106 |
-.. execute:: |
|
105 |
+.. jupyter-execute:: |
|
107 | 106 |
|
108 | 107 |
# See the data 6 later seconds with |
109 | 108 |
!ls -lah data # only works on macOS and Linux systems |
... | ... |
@@ -137,7 +136,7 @@ something until its done? |
137 | 136 |
The simplest way to accomplish this is to use |
138 | 137 |
`adaptive.BlockingRunner`: |
139 | 138 |
|
140 |
-.. execute:: |
|
139 |
+.. jupyter-execute:: |
|
141 | 140 |
|
142 | 141 |
learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
143 | 142 |
adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) |
... | ... |
@@ -164,7 +163,7 @@ way with adaptive. |
164 | 163 |
The simplest way is to use `adaptive.runner.simple` to run your |
165 | 164 |
learner: |
166 | 165 |
|
167 |
-.. execute:: |
|
166 |
+.. jupyter-execute:: |
|
168 | 167 |
|
169 | 168 |
learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
170 | 169 |
|
... | ... |
@@ -180,7 +179,7 @@ If you want to enable determinism, want to continue using the |
180 | 179 |
non-blocking `adaptive.Runner`, you can use the |
181 | 180 |
`adaptive.runner.SequentialExecutor`: |
182 | 181 |
|
183 |
-.. execute:: |
|
182 |
+.. jupyter-execute:: |
|
184 | 183 |
|
185 | 184 |
from adaptive.runner import SequentialExecutor |
186 | 185 |
|
... | ... |
@@ -188,16 +187,16 @@ non-blocking `adaptive.Runner`, you can use the |
188 | 187 |
|
189 | 188 |
runner = adaptive.Runner(learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.01) |
190 | 189 |
|
191 |
-.. execute:: |
|
190 |
+.. jupyter-execute:: |
|
192 | 191 |
:hide-code: |
193 | 192 |
|
194 | 193 |
await runner.task # This is not needed in a notebook environment! |
195 | 194 |
|
196 |
-.. execute:: |
|
195 |
+.. jupyter-execute:: |
|
197 | 196 |
|
198 | 197 |
runner.live_info() |
199 | 198 |
|
200 |
-.. execute:: |
|
199 |
+.. jupyter-execute:: |
|
201 | 200 |
|
202 | 201 |
runner.live_plot(update_interval=0.1) |
203 | 202 |
|
... | ... |
@@ -215,29 +214,29 @@ cancelled. |
215 | 214 |
the runner. You can also stop the runner programatically using |
216 | 215 |
``runner.cancel()``. |
217 | 216 |
|
218 |
-.. execute:: |
|
217 |
+.. jupyter-execute:: |
|
219 | 218 |
|
220 | 219 |
learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
221 | 220 |
runner = adaptive.Runner(learner) |
222 | 221 |
|
223 |
-.. execute:: |
|
222 |
+.. jupyter-execute:: |
|
224 | 223 |
:hide-code: |
225 | 224 |
|
226 | 225 |
await asyncio.sleep(0.1) # This is not needed in the notebook! |
227 | 226 |
|
228 |
-.. execute:: |
|
227 |
+.. jupyter-execute:: |
|
229 | 228 |
|
230 | 229 |
runner.cancel() # Let's execute this after 0.1 seconds |
231 | 230 |
|
232 |
-.. execute:: |
|
231 |
+.. jupyter-execute:: |
|
233 | 232 |
|
234 | 233 |
runner.live_info() |
235 | 234 |
|
236 |
-.. execute:: |
|
235 |
+.. jupyter-execute:: |
|
237 | 236 |
|
238 | 237 |
runner.live_plot(update_interval=0.1) |
239 | 238 |
|
240 |
-.. execute:: |
|
239 |
+.. jupyter-execute:: |
|
241 | 240 |
|
242 | 241 |
print(runner.status()) |
243 | 242 |
|
... | ... |
@@ -253,7 +252,7 @@ gone wrong is that nothing will be happening. |
253 | 252 |
Let’s look at the following example, where the function to be learned |
254 | 253 |
will raise an exception 10% of the time. |
255 | 254 |
|
256 |
-.. execute:: |
|
255 |
+.. jupyter-execute:: |
|
257 | 256 |
|
258 | 257 |
def will_raise(x): |
259 | 258 |
from random import random |
... | ... |
@@ -268,16 +267,16 @@ will raise an exception 10% of the time. |
268 | 267 |
runner = adaptive.Runner(learner) # without 'goal' the runner will run forever unless cancelled |
269 | 268 |
|
270 | 269 |
|
271 |
-.. execute:: |
|
270 |
+.. jupyter-execute:: |
|
272 | 271 |
:hide-code: |
273 | 272 |
|
274 | 273 |
await asyncio.sleep(4) # in 4 seconds it will surely have failed |
275 | 274 |
|
276 |
-.. execute:: |
|
275 |
+.. jupyter-execute:: |
|
277 | 276 |
|
278 | 277 |
runner.live_info() |
279 | 278 |
|
280 |
-.. execute:: |
|
279 |
+.. jupyter-execute:: |
|
281 | 280 |
|
282 | 281 |
runner.live_plot() |
283 | 282 |
|
... | ... |
@@ -286,7 +285,7 @@ after a few points are evaluated. |
286 | 285 |
|
287 | 286 |
First we should check that the runner has really finished: |
288 | 287 |
|
289 |
-.. execute:: |
|
288 |
+.. jupyter-execute:: |
|
290 | 289 |
|
291 | 290 |
runner.task.done() |
292 | 291 |
|
... | ... |
@@ -295,7 +294,7 @@ runner. This should be ``None`` if the runner stopped successfully. If |
295 | 294 |
the runner stopped due to an exception then asking for the result will |
296 | 295 |
raise the exception with the stack trace: |
297 | 296 |
|
298 |
-.. execute:: |
|
297 |
+.. jupyter-execute:: |
|
299 | 298 |
|
300 | 299 |
runner.task.result() |
301 | 300 |
|
... | ... |
@@ -306,18 +305,18 @@ Runners do their job in the background, which makes introspection quite |
306 | 305 |
cumbersome. One way to inspect runners is to instantiate one with |
307 | 306 |
``log=True``: |
308 | 307 |
|
309 |
-.. execute:: |
|
308 |
+.. jupyter-execute:: |
|
310 | 309 |
|
311 | 310 |
learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
312 |
- runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.1, |
|
311 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01, |
|
313 | 312 |
log=True) |
314 | 313 |
|
315 |
-.. execute:: |
|
314 |
+.. jupyter-execute:: |
|
316 | 315 |
:hide-code: |
317 | 316 |
|
318 | 317 |
await runner.task # This is not needed in a notebook environment! |
319 | 318 |
|
320 |
-.. execute:: |
|
319 |
+.. jupyter-execute:: |
|
321 | 320 |
|
322 | 321 |
runner.live_info() |
323 | 322 |
|
... | ... |
@@ -329,12 +328,12 @@ non-deterministic order. |
329 | 328 |
This can be used with `adaptive.runner.replay_log` to perfom the same |
330 | 329 |
set of operations on another runner: |
331 | 330 |
|
332 |
-.. execute:: |
|
331 |
+.. jupyter-execute:: |
|
333 | 332 |
|
334 | 333 |
reconstructed_learner = adaptive.Learner1D(f, bounds=learner.bounds) |
335 | 334 |
adaptive.runner.replay_log(reconstructed_learner, runner.log) |
336 | 335 |
|
337 |
-.. execute:: |
|
336 |
+.. jupyter-execute:: |
|
338 | 337 |
|
339 | 338 |
learner.plot().Scatter.I.opts(style=dict(size=6)) * reconstructed_learner.plot() |
340 | 339 |
|
... | ... |
@@ -356,7 +355,7 @@ not do anything when the kernel is blocked. |
356 | 355 |
Therefore you need to create an ``async`` function and hook it into the |
357 | 356 |
``ioloop`` like so: |
358 | 357 |
|
359 |
-.. execute:: |
|
358 |
+.. jupyter-execute:: |
|
360 | 359 |
|
361 | 360 |
import asyncio |
362 | 361 |
|
... | ... |
@@ -373,12 +372,12 @@ Therefore you need to create an ``async`` function and hook it into the |
373 | 372 |
|
374 | 373 |
timer = ioloop.create_task(time(runner)) |
375 | 374 |
|
376 |
-.. execute:: |
|
375 |
+.. jupyter-execute:: |
|
377 | 376 |
:hide-code: |
378 | 377 |
|
379 | 378 |
await runner.task # This is not needed in a notebook environment! |
380 | 379 |
|
381 |
-.. execute:: |
|
380 |
+.. jupyter-execute:: |
|
382 | 381 |
|
383 | 382 |
# The result will only be set when the runner is done. |
384 | 383 |
timer.result() |
... | ... |
@@ -8,11 +8,10 @@ Custom adaptive logic for 1D and 2D |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`custom-loss-function` |
|
11 |
+ :jupyter-download:notebook:`tutorial.custom-loss-function` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: custom-loss-function |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -59,7 +58,7 @@ Uniform sampling |
59 | 58 |
Say we want to properly sample a function that contains divergences. A |
60 | 59 |
simple (but naive) strategy is to *uniformly* sample the domain: |
61 | 60 |
|
62 |
-.. execute:: |
|
61 |
+.. jupyter-execute:: |
|
63 | 62 |
|
64 | 63 |
def uniform_sampling_1d(interval, scale, function_values): |
65 | 64 |
# Note that we never use 'function_values'; the loss is just the size of the subdomain |
... | ... |
@@ -75,7 +74,7 @@ simple (but naive) strategy is to *uniformly* sample the domain: |
75 | 74 |
runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) |
76 | 75 |
learner.plot().select(y=(0, 10000)) |
77 | 76 |
|
78 |
-.. execute:: |
|
77 |
+.. jupyter-execute:: |
|
79 | 78 |
|
80 | 79 |
%%opts EdgePaths (color='w') Image [logz=True colorbar=True] |
81 | 80 |
|
... | ... |
@@ -95,16 +94,16 @@ simple (but naive) strategy is to *uniformly* sample the domain: |
95 | 94 |
# this takes a while, so use the async Runner so we know *something* is happening |
96 | 95 |
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.02) |
97 | 96 |
|
98 |
-.. execute:: |
|
97 |
+.. jupyter-execute:: |
|
99 | 98 |
:hide-code: |
100 | 99 |
|
101 | 100 |
await runner.task # This is not needed in a notebook environment! |
102 | 101 |
|
103 |
-.. execute:: |
|
102 |
+.. jupyter-execute:: |
|
104 | 103 |
|
105 | 104 |
runner.live_info() |
106 | 105 |
|
107 |
-.. execute:: |
|
106 |
+.. jupyter-execute:: |
|
108 | 107 |
|
109 | 108 |
plotter = lambda l: l.plot(tri_alpha=0.3).relabel( |
110 | 109 |
'1 / (x^2 + y^2) in log scale') |
... | ... |
@@ -132,7 +131,7 @@ subdomains are appropriately small it will prioritise places where the |
132 | 131 |
function is very nonlinear, but will ignore subdomains that are too |
133 | 132 |
small (0 loss). |
134 | 133 |
|
135 |
-.. execute:: |
|
134 |
+.. jupyter-execute:: |
|
136 | 135 |
|
137 | 136 |
%%opts EdgePaths (color='w') Image [logz=True colorbar=True] |
138 | 137 |
|