... | ... |
@@ -20,7 +20,7 @@ def notebook_extension(): |
20 | 20 |
try: |
21 | 21 |
import ipywidgets |
22 | 22 |
import holoviews |
23 |
- holoviews.notebook_extension('bokeh') |
|
23 |
+ holoviews.notebook_extension('bokeh', logo=False) |
|
24 | 24 |
_plotting_enabled = True |
25 | 25 |
except ModuleNotFoundError: |
26 | 26 |
warnings.warn("holoviews and (or) ipywidgets are not installed; plotting " |
... | ... |
@@ -264,7 +264,7 @@ class BlockingRunner(BaseRunner): |
264 | 264 |
of cores available in `executor`. |
265 | 265 |
log : bool, default: False |
266 | 266 |
If True, record the method calls made to the learner by this runner. |
267 |
- shutdown_executor : Bool, default: False |
|
267 |
+ shutdown_executor : bool, default: False |
|
268 | 268 |
If True, shutdown the executor when the runner has completed. If |
269 | 269 |
`executor` is not provided then the executor created internally |
270 | 270 |
by the runner is shut down, regardless of this parameter. |
... | ... |
@@ -367,7 +367,7 @@ class AsyncRunner(BaseRunner): |
367 | 367 |
of cores available in `executor`. |
368 | 368 |
log : bool, default: False |
369 | 369 |
If True, record the method calls made to the learner by this runner. |
370 |
- shutdown_executor : Bool, default: False |
|
370 |
+ shutdown_executor : bool, default: False |
|
371 | 371 |
If True, shutdown the executor when the runner has completed. If |
372 | 372 |
`executor` is not provided then the executor created internally |
373 | 373 |
by the runner is shut down, regardless of this parameter. |
... | ... |
@@ -47,6 +47,7 @@ extensions = [ |
47 | 47 |
'sphinx.ext.mathjax', |
48 | 48 |
'sphinx.ext.viewcode', |
49 | 49 |
'sphinx.ext.napoleon', |
50 |
+ 'jupyter_sphinx.execute', |
|
50 | 51 |
] |
51 | 52 |
|
52 | 53 |
source_parsers = {} |
... | ... |
@@ -113,13 +114,52 @@ html_static_path = ['_static'] |
113 | 114 |
# Output file base name for HTML help builder. |
114 | 115 |
htmlhelp_basename = 'adaptivedoc' |
115 | 116 |
|
117 |
+ |
|
116 | 118 |
# -- Extension configuration ------------------------------------------------- |
117 | 119 |
|
118 | 120 |
default_role = 'autolink' |
119 | 121 |
|
120 |
-intersphinx_mapping = {'python': ('https://docs.python.org/3', None), |
|
121 |
- 'distributed': ('https://distributed.readthedocs.io/en/stable/', None), |
|
122 |
- 'holoviews': ('https://holoviews.org/', None), |
|
123 |
- 'ipyparallel': ('https://ipyparallel.readthedocs.io/en/stable/', None), |
|
124 |
- 'scipy': ('https://docs.scipy.org/doc/scipy/reference', None), |
|
122 |
+intersphinx_mapping = { |
|
123 |
+ 'python': ('https://docs.python.org/3', None), |
|
124 |
+ 'distributed': ('https://distributed.readthedocs.io/en/stable/', None), |
|
125 |
+ 'holoviews': ('https://holoviews.org/', None), |
|
126 |
+ 'ipyparallel': ('https://ipyparallel.readthedocs.io/en/stable/', None), |
|
127 |
+ 'scipy': ('https://docs.scipy.org/doc/scipy/reference', None), |
|
125 | 128 |
} |
129 |
+ |
|
130 |
+ |
|
131 |
+# -- Add Holoviews js and css ------------------------------------------------ |
|
132 |
+ |
|
133 |
+def setup(app): |
|
134 |
+ from holoviews.plotting import Renderer |
|
135 |
+ |
|
136 |
+ hv_js, hv_css = Renderer.html_assets( |
|
137 |
+ extras=False, backends=[], script=True) |
|
138 |
+ |
|
139 |
+ fname_css = 'holoviews.css' |
|
140 |
+ fname_js = 'holoviews.js' |
|
141 |
+ static_dir = 'source/_static' |
|
142 |
+ |
|
143 |
+ os.makedirs(static_dir, exist_ok=True) |
|
144 |
+ |
|
145 |
+ with open(f'{static_dir}/{fname_css}', 'w') as f: |
|
146 |
+ hv_css = hv_css.split('<style>')[1].replace('</style>', '') |
|
147 |
+ f.write(hv_css) |
|
148 |
+ |
|
149 |
+ with open(f'{static_dir}/{fname_js}', 'w') as f: |
|
150 |
+ f.write(hv_js) |
|
151 |
+ |
|
152 |
+ app.add_stylesheet(fname_css) |
|
153 |
+ app.add_javascript(fname_js) |
|
154 |
+ |
|
155 |
+ dependencies = {**Renderer.core_dependencies, **Renderer.extra_dependencies} |
|
156 |
+ for name, type_url in dependencies.items(): |
|
157 |
+ if name in ['bootstrap']: |
|
158 |
+ continue |
|
159 |
+ |
|
160 |
+ for url in type_url.get('js', []): |
|
161 |
+ app.add_javascript(url) |
|
162 |
+ for url in type_url.get('css', []): |
|
163 |
+ app.add_stylesheet(url) |
|
164 |
+ |
|
165 |
+ app.add_javascript("https://unpkg.com/@jupyter-widgets/html-manager@0.14.4/dist/embed-amd.js") |
28 | 29 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,18 @@ |
1 |
+adaptive.runner.simple |
|
2 |
+====================== |
|
3 |
+ |
|
4 |
+Simple executor |
|
5 |
+--------------- |
|
6 |
+ |
|
7 |
+.. autofunction:: adaptive.runner.simple |
|
8 |
+ |
|
9 |
+Sequential excecutor |
|
10 |
+-------------------- |
|
11 |
+ |
|
12 |
+.. autofunction:: adaptive.runner.SequentialExecutor |
|
13 |
+ |
|
14 |
+ |
|
15 |
+Replay log |
|
16 |
+---------- |
|
17 |
+ |
|
18 |
+.. autofunction:: adaptive.runner.replay_log |
0 | 19 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,57 @@ |
1 |
+Tutorial `~adaptive.AverageLearner` |
|
2 |
+----------------------------------- |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`AverageLearner` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: AverageLearner |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+The next type of learner averages a function until the uncertainty in |
|
21 |
+the average meets some condition. |
|
22 |
+ |
|
23 |
+This is useful for sampling a random variable. The function passed to |
|
24 |
+the learner must formally take a single parameter, which should be used |
|
25 |
+like a “seed” for the (pseudo-) random variable (although in the current |
|
26 |
+implementation the seed parameter can be ignored by the function). |
|
27 |
+ |
|
28 |
+.. execute:: |
|
29 |
+ |
|
30 |
+ def g(n): |
|
31 |
+ import random |
|
32 |
+ from time import sleep |
|
33 |
+ sleep(random.random() / 1000) |
|
34 |
+ # Properly save and restore the RNG state |
|
35 |
+ state = random.getstate() |
|
36 |
+ random.seed(n) |
|
37 |
+ val = random.gauss(0.5, 1) |
|
38 |
+ random.setstate(state) |
|
39 |
+ return val |
|
40 |
+ |
|
41 |
+.. execute:: |
|
42 |
+ |
|
43 |
+ learner = adaptive.AverageLearner(g, atol=None, rtol=0.01) |
|
44 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 2) |
|
45 |
+ |
|
46 |
+.. execute:: |
|
47 |
+ :hide-code: |
|
48 |
+ |
|
49 |
+ await runner.task # This is not needed in a notebook environment! |
|
50 |
+ |
|
51 |
+.. execute:: |
|
52 |
+ |
|
53 |
+ runner.live_info() |
|
54 |
+ |
|
55 |
+.. execute:: |
|
56 |
+ |
|
57 |
+ runner.live_plot(update_interval=0.1) |
0 | 58 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,83 @@ |
1 |
+Tutorial `~adaptive.BalancingLearner` |
|
2 |
+------------------------------------- |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`BalancingLearner` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: BalancingLearner |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+ import holoviews as hv |
|
21 |
+ import numpy as np |
|
22 |
+ from functools import partial |
|
23 |
+ import random |
|
24 |
+ |
|
25 |
+The balancing learner is a “meta-learner” that takes a list of learners. |
|
26 |
+When you request a point from the balancing learner, it will query all |
|
27 |
+of its “children” to figure out which one will give the most |
|
28 |
+improvement. |
|
29 |
+ |
|
30 |
+The balancing learner can for example be used to implement a poor-man’s |
|
31 |
+2D learner by using the `~adaptive.Learner1D`. |
|
32 |
+ |
|
33 |
+.. execute:: |
|
34 |
+ |
|
35 |
+ def h(x, offset=0): |
|
36 |
+ a = 0.01 |
|
37 |
+ return x + a**2 / (a**2 + (x - offset)**2) |
|
38 |
+ |
|
39 |
+ learners = [adaptive.Learner1D(partial(h, offset=random.uniform(-1, 1)), |
|
40 |
+ bounds=(-1, 1)) for i in range(10)] |
|
41 |
+ |
|
42 |
+ bal_learner = adaptive.BalancingLearner(learners) |
|
43 |
+ runner = adaptive.Runner(bal_learner, goal=lambda l: l.loss() < 0.01) |
|
44 |
+ |
|
45 |
+.. execute:: |
|
46 |
+ :hide-code: |
|
47 |
+ |
|
48 |
+ await runner.task # This is not needed in a notebook environment! |
|
49 |
+ |
|
50 |
+.. execute:: |
|
51 |
+ |
|
52 |
+ runner.live_info() |
|
53 |
+ |
|
54 |
+.. execute:: |
|
55 |
+ |
|
56 |
+ plotter = lambda learner: hv.Overlay([L.plot() for L in learner.learners]) |
|
57 |
+ runner.live_plot(plotter=plotter, update_interval=0.1) |
|
58 |
+ |
|
59 |
+Often one wants to create a set of ``learner``\ s for a cartesian |
|
60 |
+product of parameters. For that particular case we’ve added a |
|
61 |
+``classmethod`` called ``~adaptive.BalancingLearner.from_product``. |
|
62 |
+See how it works below |
|
63 |
+ |
|
64 |
+.. execute:: |
|
65 |
+ |
|
66 |
+ from scipy.special import eval_jacobi |
|
67 |
+ |
|
68 |
+ def jacobi(x, n, alpha, beta): return eval_jacobi(n, alpha, beta, x) |
|
69 |
+ |
|
70 |
+ combos = { |
|
71 |
+ 'n': [1, 2, 4, 8], |
|
72 |
+ 'alpha': np.linspace(0, 2, 3), |
|
73 |
+ 'beta': np.linspace(0, 1, 5), |
|
74 |
+ } |
|
75 |
+ |
|
76 |
+ learner = adaptive.BalancingLearner.from_product( |
|
77 |
+ jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos) |
|
78 |
+ |
|
79 |
+ runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) |
|
80 |
+ |
|
81 |
+ # The `cdims` will automatically be set when using `from_product`, so |
|
82 |
+ # `plot()` will return a HoloMap with correctly labeled sliders. |
|
83 |
+ learner.plot().overlay('beta').grid().select(y=(-1, 3)) |
0 | 84 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,74 @@ |
1 |
+Tutorial `~adaptive.DataSaver` |
|
2 |
+------------------------------ |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`DataSaver` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: DataSaver |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+If the function that you want to learn returns a value along with some |
|
21 |
+metadata, you can wrap your learner in an `adaptive.DataSaver`. |
|
22 |
+ |
|
23 |
+In the following example the function to be learned returns its result |
|
24 |
+and the execution time in a dictionary: |
|
25 |
+ |
|
26 |
+.. execute:: |
|
27 |
+ |
|
28 |
+ from operator import itemgetter |
|
29 |
+ |
|
30 |
+ def f_dict(x): |
|
31 |
+ """The function evaluation takes roughly the time we `sleep`.""" |
|
32 |
+ import random |
|
33 |
+ from time import sleep |
|
34 |
+ |
|
35 |
+ waiting_time = random.random() |
|
36 |
+ sleep(waiting_time) |
|
37 |
+ a = 0.01 |
|
38 |
+ y = x + a**2 / (a**2 + x**2) |
|
39 |
+ return {'y': y, 'waiting_time': waiting_time} |
|
40 |
+ |
|
41 |
+ # Create the learner with the function that returns a 'dict' |
|
42 |
+ # This learner cannot be run directly, as Learner1D does not know what to do with the 'dict' |
|
43 |
+ _learner = adaptive.Learner1D(f_dict, bounds=(-1, 1)) |
|
44 |
+ |
|
45 |
+ # Wrapping the learner with 'adaptive.DataSaver' and tell it which key it needs to learn |
|
46 |
+ learner = adaptive.DataSaver(_learner, arg_picker=itemgetter('y')) |
|
47 |
+ |
|
48 |
+``learner.learner`` is the original learner, so |
|
49 |
+``learner.learner.loss()`` will call the correct loss method. |
|
50 |
+ |
|
51 |
+.. execute:: |
|
52 |
+ |
|
53 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.learner.loss() < 0.1) |
|
54 |
+ |
|
55 |
+.. execute:: |
|
56 |
+ :hide-code: |
|
57 |
+ |
|
58 |
+ await runner.task # This is not needed in a notebook environment! |
|
59 |
+ |
|
60 |
+.. execute:: |
|
61 |
+ |
|
62 |
+ runner.live_info() |
|
63 |
+ |
|
64 |
+.. execute:: |
|
65 |
+ |
|
66 |
+ runner.live_plot(plotter=lambda l: l.learner.plot(), update_interval=0.1) |
|
67 |
+ |
|
68 |
+Now the ``DataSavingLearner`` will have an dictionary attribute |
|
69 |
+``extra_data`` that has ``x`` as key and the data that was returned by |
|
70 |
+``learner.function`` as values. |
|
71 |
+ |
|
72 |
+.. execute:: |
|
73 |
+ |
|
74 |
+ learner.extra_data |
0 | 75 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,84 @@ |
1 |
+Tutorial `~adaptive.IntegratorLearner` |
|
2 |
+-------------------------------------- |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`IntegratorLearner` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: IntegratorLearner |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+ import holoviews as hv |
|
21 |
+ import numpy as np |
|
22 |
+ |
|
23 |
+This learner learns a 1D function and calculates the integral and error |
|
24 |
+of the integral with it. It is based on Pedro Gonnet’s |
|
25 |
+`implementation <https://www.academia.edu/1976055/Adaptive_quadrature_re-revisited>`__. |
|
26 |
+ |
|
27 |
+Let’s try the following function with cusps (that is difficult to |
|
28 |
+integrate): |
|
29 |
+ |
|
30 |
+.. execute:: |
|
31 |
+ |
|
32 |
+ def f24(x): |
|
33 |
+ return np.floor(np.exp(x)) |
|
34 |
+ |
|
35 |
+ xs = np.linspace(0, 3, 200) |
|
36 |
+ hv.Scatter((xs, [f24(x) for x in xs])) |
|
37 |
+ |
|
38 |
+Just to prove that this really is a difficult to integrate function, |
|
39 |
+let’s try a familiar function integrator `scipy.integrate.quad`, which |
|
40 |
+will give us warnings that it encounters difficulties (if we run it |
|
41 |
+in a notebook.) |
|
42 |
+ |
|
43 |
+.. execute:: |
|
44 |
+ |
|
45 |
+ import scipy.integrate |
|
46 |
+ scipy.integrate.quad(f24, 0, 3) |
|
47 |
+ |
|
48 |
+We initialize a learner again and pass the bounds and relative tolerance |
|
49 |
+we want to reach. Then in the `~adaptive.Runner` we pass |
|
50 |
+``goal=lambda l: l.done()`` where ``learner.done()`` is ``True`` when |
|
51 |
+the relative tolerance has been reached. |
|
52 |
+ |
|
53 |
+.. execute:: |
|
54 |
+ |
|
55 |
+ from adaptive.runner import SequentialExecutor |
|
56 |
+ |
|
57 |
+ learner = adaptive.IntegratorLearner(f24, bounds=(0, 3), tol=1e-8) |
|
58 |
+ |
|
59 |
+ # We use a SequentialExecutor, which runs the function to be learned in |
|
60 |
+ # *this* process only. This means we don't pay |
|
61 |
+ # the overhead of evaluating the function in another process. |
|
62 |
+ runner = adaptive.Runner(learner, executor=SequentialExecutor(), goal=lambda l: l.done()) |
|
63 |
+ |
|
64 |
+.. execute:: |
|
65 |
+ :hide-code: |
|
66 |
+ |
|
67 |
+ await runner.task # This is not needed in a notebook environment! |
|
68 |
+ |
|
69 |
+.. execute:: |
|
70 |
+ |
|
71 |
+ runner.live_info() |
|
72 |
+ |
|
73 |
+Now we could do the live plotting again, but lets just wait untill the |
|
74 |
+runner is done. |
|
75 |
+ |
|
76 |
+.. execute:: |
|
77 |
+ |
|
78 |
+ if not runner.task.done(): |
|
79 |
+ raise RuntimeError('Wait for the runner to finish before executing the cells below!') |
|
80 |
+ |
|
81 |
+.. execute:: |
|
82 |
+ |
|
83 |
+ print('The integral value is {} with the corresponding error of {}'.format(learner.igral, learner.err)) |
|
84 |
+ learner.plot() |
0 | 85 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,140 @@ |
1 |
+Tutorial `~adaptive.Learner1D` |
|
2 |
+------------------------------ |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`Learner1D` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: Learner1D |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+ import numpy as np |
|
21 |
+ from functools import partial |
|
22 |
+ import random |
|
23 |
+ |
|
24 |
+scalar output: ``f:ℝ → ℝ`` |
|
25 |
+.......................... |
|
26 |
+ |
|
27 |
+We start with the most common use-case: sampling a 1D function |
|
28 |
+:math:`\ f: ℝ → ℝ`. |
|
29 |
+ |
|
30 |
+We will use the following function, which is a smooth (linear) |
|
31 |
+background with a sharp peak at a random location: |
|
32 |
+ |
|
33 |
+.. execute:: |
|
34 |
+ |
|
35 |
+ offset = random.uniform(-0.5, 0.5) |
|
36 |
+ |
|
37 |
+ def f(x, offset=offset, wait=True): |
|
38 |
+ from time import sleep |
|
39 |
+ from random import random |
|
40 |
+ |
|
41 |
+ a = 0.01 |
|
42 |
+ if wait: |
|
43 |
+ sleep(random()) |
|
44 |
+ return x + a**2 / (a**2 + (x - offset)**2) |
|
45 |
+ |
|
46 |
+We start by initializing a 1D “learner”, which will suggest points to |
|
47 |
+evaluate, and adapt its suggestions as more and more points are |
|
48 |
+evaluated. |
|
49 |
+ |
|
50 |
+.. execute:: |
|
51 |
+ |
|
52 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
53 |
+ |
|
54 |
+Next we create a “runner” that will request points from the learner and |
|
55 |
+evaluate ‘f’ on them. |
|
56 |
+ |
|
57 |
+By default on Unix-like systems the runner will evaluate the points in |
|
58 |
+parallel using local processes `concurrent.futures.ProcessPoolExecutor`. |
|
59 |
+ |
|
60 |
+On Windows systems the runner will try to use a `distributed.Client` |
|
61 |
+if `distributed` is installed. A `~concurrent.futures.ProcessPoolExecutor` |
|
62 |
+cannot be used on Windows for reasons. |
|
63 |
+ |
|
64 |
+.. execute:: |
|
65 |
+ |
|
66 |
+ # The end condition is when the "loss" is less than 0.1. In the context of the |
|
67 |
+ # 1D learner this means that we will resolve features in 'func' with width 0.1 or wider. |
|
68 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
|
69 |
+ |
|
70 |
+.. execute:: |
|
71 |
+ :hide-code: |
|
72 |
+ |
|
73 |
+ await runner.task # This is not needed in a notebook environment! |
|
74 |
+ |
|
75 |
+When instantiated in a Jupyter notebook the runner does its job in the |
|
76 |
+background and does not block the IPython kernel. We can use this to |
|
77 |
+create a plot that updates as new data arrives: |
|
78 |
+ |
|
79 |
+.. execute:: |
|
80 |
+ |
|
81 |
+ runner.live_info() |
|
82 |
+ |
|
83 |
+.. execute:: |
|
84 |
+ |
|
85 |
+ runner.live_plot(update_interval=0.1) |
|
86 |
+ |
|
87 |
+We can now compare the adaptive sampling to a homogeneous sampling with |
|
88 |
+the same number of points: |
|
89 |
+ |
|
90 |
+.. execute:: |
|
91 |
+ |
|
92 |
+ if not runner.task.done(): |
|
93 |
+ raise RuntimeError('Wait for the runner to finish before executing the cells below!') |
|
94 |
+ |
|
95 |
+.. execute:: |
|
96 |
+ |
|
97 |
+ learner2 = adaptive.Learner1D(f, bounds=learner.bounds) |
|
98 |
+ |
|
99 |
+ xs = np.linspace(*learner.bounds, len(learner.data)) |
|
100 |
+ learner2.tell_many(xs, map(partial(f, wait=False), xs)) |
|
101 |
+ |
|
102 |
+ learner.plot() + learner2.plot() |
|
103 |
+ |
|
104 |
+ |
|
105 |
+vector output: ``f:ℝ → ℝ^N`` |
|
106 |
+............................ |
|
107 |
+ |
|
108 |
+Sometimes you may want to learn a function with vector output: |
|
109 |
+ |
|
110 |
+.. execute:: |
|
111 |
+ |
|
112 |
+ random.seed(0) |
|
113 |
+ offsets = [random.uniform(-0.8, 0.8) for _ in range(3)] |
|
114 |
+ |
|
115 |
+ # sharp peaks at random locations in the domain |
|
116 |
+ def f_levels(x, offsets=offsets): |
|
117 |
+ a = 0.01 |
|
118 |
+ return np.array([offset + x + a**2 / (a**2 + (x - offset)**2) |
|
119 |
+ for offset in offsets]) |
|
120 |
+ |
|
121 |
+``adaptive`` has you covered! The ``Learner1D`` can be used for such |
|
122 |
+functions: |
|
123 |
+ |
|
124 |
+.. execute:: |
|
125 |
+ |
|
126 |
+ learner = adaptive.Learner1D(f_levels, bounds=(-1, 1)) |
|
127 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
|
128 |
+ |
|
129 |
+.. execute:: |
|
130 |
+ :hide-code: |
|
131 |
+ |
|
132 |
+ await runner.task # This is not needed in a notebook environment! |
|
133 |
+ |
|
134 |
+.. execute:: |
|
135 |
+ |
|
136 |
+ runner.live_info() |
|
137 |
+ |
|
138 |
+.. execute:: |
|
139 |
+ |
|
140 |
+ runner.live_plot(update_interval=0.1) |
0 | 141 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,75 @@ |
1 |
+Tutorial `~adaptive.Learner2D` |
|
2 |
+------------------------------ |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`Learner2D` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: Learner2D |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+ import numpy as np |
|
21 |
+ from functools import partial |
|
22 |
+ |
|
23 |
+Besides 1D functions, we can also learn 2D functions: |
|
24 |
+:math:`\ f: ℝ^2 → ℝ`. |
|
25 |
+ |
|
26 |
+.. execute:: |
|
27 |
+ |
|
28 |
+ def ring(xy, wait=True): |
|
29 |
+ import numpy as np |
|
30 |
+ from time import sleep |
|
31 |
+ from random import random |
|
32 |
+ if wait: |
|
33 |
+ sleep(random()/10) |
|
34 |
+ x, y = xy |
|
35 |
+ a = 0.2 |
|
36 |
+ return x + np.exp(-(x**2 + y**2 - 0.75**2)**2/a**4) |
|
37 |
+ |
|
38 |
+ learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) |
|
39 |
+ |
|
40 |
+.. execute:: |
|
41 |
+ |
|
42 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
|
43 |
+ |
|
44 |
+.. execute:: |
|
45 |
+ :hide-code: |
|
46 |
+ |
|
47 |
+ await runner.task # This is not needed in a notebook environment! |
|
48 |
+ |
|
49 |
+.. execute:: |
|
50 |
+ |
|
51 |
+ runner.live_info() |
|
52 |
+ |
|
53 |
+.. execute:: |
|
54 |
+ |
|
55 |
+ def plot(learner): |
|
56 |
+ plot = learner.plot(tri_alpha=0.2) |
|
57 |
+ return (plot.Image + plot.EdgePaths.I + plot).cols(2) |
|
58 |
+ |
|
59 |
+ runner.live_plot(plotter=plot, update_interval=0.1) |
|
60 |
+ |
|
61 |
+.. execute:: |
|
62 |
+ |
|
63 |
+ %%opts EdgePaths (color='w') |
|
64 |
+ |
|
65 |
+ import itertools |
|
66 |
+ |
|
67 |
+ # Create a learner and add data on homogeneous grid, so that we can plot it |
|
68 |
+ learner2 = adaptive.Learner2D(ring, bounds=learner.bounds) |
|
69 |
+ n = int(learner.npoints**0.5) |
|
70 |
+ xs, ys = [np.linspace(*bounds, n) for bounds in learner.bounds] |
|
71 |
+ xys = list(itertools.product(xs, ys)) |
|
72 |
+ learner2.tell_many(xys, map(partial(ring, wait=False), xys)) |
|
73 |
+ |
|
74 |
+ (learner2.plot(n).relabel('Homogeneous grid') + learner.plot().relabel('With adaptive') + |
|
75 |
+ learner2.plot(n, tri_alpha=0.4) + learner.plot(tri_alpha=0.4)).cols(2) |
0 | 76 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,93 @@ |
1 |
+Tutorial `~adaptive.LearnerND` |
|
2 |
+------------------------------ |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`LearnerND` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: LearnerND |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+ import holoviews as hv |
|
21 |
+ import numpy as np |
|
22 |
+ |
|
23 |
+ def dynamicmap_to_holomap(dm): |
|
24 |
+ # XXX: change when https://github.com/ioam/holoviews/issues/3085 |
|
25 |
+ # is fixed. |
|
26 |
+ vals = {d.name: d.values for d in dm.dimensions() if d.values} |
|
27 |
+ return hv.HoloMap(dm.select(**vals)) |
|
28 |
+ |
|
29 |
+Besides 1 and 2 dimensional functions, we can also learn N-D functions: |
|
30 |
+:math:`\ f: ℝ^N → ℝ^M, N \ge 2, M \ge 1`. |
|
31 |
+ |
|
32 |
+Do keep in mind the speed and |
|
33 |
+`effectiveness <https://en.wikipedia.org/wiki/Curse_of_dimensionality>`__ |
|
34 |
+of the learner drops quickly with increasing number of dimensions. |
|
35 |
+ |
|
36 |
+.. execute:: |
|
37 |
+ |
|
38 |
+ # this step takes a lot of time, it will finish at about 3300 points, which can take up to 6 minutes |
|
39 |
+ def sphere(xyz): |
|
40 |
+ x, y, z = xyz |
|
41 |
+ a = 0.4 |
|
42 |
+ return x + z**2 + np.exp(-(x**2 + y**2 + z**2 - 0.75**2)**2/a**4) |
|
43 |
+ |
|
44 |
+ learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)]) |
|
45 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
|
46 |
+ |
|
47 |
+.. execute:: |
|
48 |
+ :hide-code: |
|
49 |
+ |
|
50 |
+ await runner.task # This is not needed in a notebook environment! |
|
51 |
+ |
|
52 |
+.. execute:: |
|
53 |
+ |
|
54 |
+ runner.live_info() |
|
55 |
+ |
|
56 |
+Let’s plot 2D slices of the 3D function |
|
57 |
+ |
|
58 |
+.. execute:: |
|
59 |
+ |
|
60 |
+ def plot_cut(x, direction, learner=learner): |
|
61 |
+ cut_mapping = {'XYZ'.index(direction): x} |
|
62 |
+ return learner.plot_slice(cut_mapping, n=100) |
|
63 |
+ |
|
64 |
+ dm = hv.DynamicMap(plot_cut, kdims=['val', 'direction']) |
|
65 |
+ dm = dm.redim.values(val=np.linspace(-1, 1, 11), direction=list('XYZ')) |
|
66 |
+ |
|
67 |
+ # In a notebook one would run `dm` however we want a statically generated |
|
68 |
+ # html, so we use a HoloMap to display it here |
|
69 |
+ dynamicmap_to_holomap(dm) |
|
70 |
+ |
|
71 |
+Or we can plot 1D slices |
|
72 |
+ |
|
73 |
+.. execute:: |
|
74 |
+ |
|
75 |
+ %%opts Path {+framewise} |
|
76 |
+ def plot_cut(x1, x2, directions, learner=learner): |
|
77 |
+ cut_mapping = {'xyz'.index(d): x for d, x in zip(directions, [x1, x2])} |
|
78 |
+ return learner.plot_slice(cut_mapping) |
|
79 |
+ |
|
80 |
+ dm = hv.DynamicMap(plot_cut, kdims=['v1', 'v2', 'directions']) |
|
81 |
+ dm = dm.redim.values(v1=np.linspace(-1, 1, 6), |
|
82 |
+ v2=np.linspace(-1, 1, 6), |
|
83 |
+ directions=['xy', 'xz', 'yz']) |
|
84 |
+ |
|
85 |
+ # In a notebook one would run `dm` however we want a statically generated |
|
86 |
+ # html, so we use a HoloMap to display it here |
|
87 |
+ dynamicmap_to_holomap(dm) |
|
88 |
+ |
|
89 |
+The plots show some wobbles while the original function was smooth, this |
|
90 |
+is a result of the fact that the learner chooses points in 3 dimensions |
|
91 |
+and the simplices are not in the same face as we try to interpolate our |
|
92 |
+lines. However, as always, when you sample more points the graph will |
|
93 |
+become gradually smoother. |
0 | 94 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,66 @@ |
1 |
+Tutorial `~adaptive.SKOptLearner` |
|
2 |
+--------------------------------- |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`SKOptLearner` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: SKOptLearner |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+ import holoviews as hv |
|
21 |
+ import numpy as np |
|
22 |
+ |
|
23 |
+We have wrapped the ``Optimizer`` class from |
|
24 |
+`scikit-optimize <https://github.com/scikit-optimize/scikit-optimize>`__, |
|
25 |
+to show how existing libraries can be integrated with ``adaptive``. |
|
26 |
+ |
|
27 |
+The ``SKOptLearner`` attempts to “optimize” the given function ``g`` |
|
28 |
+(i.e. find the global minimum of ``g`` in the window of interest). |
|
29 |
+ |
|
30 |
+Here we use the same example as in the ``scikit-optimize`` |
|
31 |
+`tutorial <https://github.com/scikit-optimize/scikit-optimize/blob/master/examples/ask-and-tell.ipynb>`__. |
|
32 |
+Although ``SKOptLearner`` can optimize functions of arbitrary |
|
33 |
+dimensionality, we can only plot the learner if a 1D function is being |
|
34 |
+learned. |
|
35 |
+ |
|
36 |
+.. execute:: |
|
37 |
+ |
|
38 |
+ def F(x, noise_level=0.1): |
|
39 |
+ return (np.sin(5 * x) * (1 - np.tanh(x ** 2)) |
|
40 |
+ + np.random.randn() * noise_level) |
|
41 |
+ |
|
42 |
+.. execute:: |
|
43 |
+ |
|
44 |
+ learner = adaptive.SKOptLearner(F, dimensions=[(-2., 2.)], |
|
45 |
+ base_estimator="GP", |
|
46 |
+ acq_func="gp_hedge", |
|
47 |
+ acq_optimizer="lbfgs", |
|
48 |
+ ) |
|
49 |
+ runner = adaptive.Runner(learner, ntasks=1, goal=lambda l: l.npoints > 40) |
|
50 |
+ |
|
51 |
+.. execute:: |
|
52 |
+ :hide-code: |
|
53 |
+ |
|
54 |
+ await runner.task # This is not needed in a notebook environment! |
|
55 |
+ |
|
56 |
+.. execute:: |
|
57 |
+ |
|
58 |
+ runner.live_info() |
|
59 |
+ |
|
60 |
+.. execute:: |
|
61 |
+ |
|
62 |
+ %%opts Overlay [legend_position='top'] |
|
63 |
+ xs = np.linspace(*learner.space.bounds[0]) |
|
64 |
+ to_learn = hv.Curve((xs, [F(x, 0) for x in xs]), label='to learn') |
|
65 |
+ |
|
66 |
+ runner.live_plot().relabel('prediction', depth=2) * to_learn |
0 | 67 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,340 @@ |
1 |
+Advanced Topics |
|
2 |
+=============== |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`advanced-topics` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: advanced-topics |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+ from functools import partial |
|
21 |
+ import random |
|
22 |
+ |
|
23 |
+ offset = random.uniform(-0.5, 0.5) |
|
24 |
+ |
|
25 |
+ def f(x, offset=offset, wait=True): |
|
26 |
+ from time import sleep |
|
27 |
+ from random import random |
|
28 |
+ |
|
29 |
+ a = 0.01 |
|
30 |
+ if wait: |
|
31 |
+ sleep(random()) |
|
32 |
+ return x + a**2 / (a**2 + (x - offset)**2) |
|
33 |
+ |
|
34 |
+ |
|
35 |
+A watched pot never boils! |
|
36 |
+-------------------------- |
|
37 |
+ |
|
38 |
+`adaptive.Runner` does its work in an `asyncio` task that runs |
|
39 |
+concurrently with the IPython kernel, when using ``adaptive`` from a |
|
40 |
+Jupyter notebook. This is advantageous because it allows us to do things |
|
41 |
+like live-updating plots, however it can trip you up if you’re not |
|
42 |
+careful. |
|
43 |
+ |
|
44 |
+Notably: **if you block the IPython kernel, the runner will not do any |
|
45 |
+work**. |
|
46 |
+ |
|
47 |
+For example if you wanted to wait for a runner to complete, **do not |
|
48 |
+wait in a busy loop**: |
|
49 |
+ |
|
50 |
+.. code:: python |
|
51 |
+ |
|
52 |
+ while not runner.task.done(): |
|
53 |
+ pass |
|
54 |
+ |
|
55 |
+If you do this then **the runner will never finish**. |
|
56 |
+ |
|
57 |
+What to do if you don’t care about live plotting, and just want to run |
|
58 |
+something until its done? |
|
59 |
+ |
|
60 |
+The simplest way to accomplish this is to use |
|
61 |
+`adaptive.BlockingRunner`: |
|
62 |
+ |
|
63 |
+.. execute:: |
|
64 |
+ |
|
65 |
+ learner = adaptive.Learner1D(partial(f, wait=False), bounds=(-1, 1)) |
|
66 |
+ adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) |
|
67 |
+ # This will only get run after the runner has finished |
|
68 |
+ learner.plot() |
|
69 |
+ |
|
70 |
+Reproducibility |
|
71 |
+--------------- |
|
72 |
+ |
|
73 |
+By default ``adaptive`` runners evaluate the learned function in |
|
74 |
+parallel across several cores. The runners are also opportunistic, in |
|
75 |
+that as soon as a result is available they will feed it to the learner |
|
76 |
+and request another point to replace the one that just finished. |
|
77 |
+ |
|
78 |
+Because the order in which computations complete is non-deterministic, |
|
79 |
+this means that the runner behaves in a non-deterministic way. Adaptive |
|
80 |
+makes this choice because in many cases the speedup from parallel |
|
81 |
+execution is worth sacrificing the “purity” of exactly reproducible |
|
82 |
+computations. |
|
83 |
+ |
|
84 |
+Nevertheless it is still possible to run a learner in a deterministic |
|
85 |
+way with adaptive. |
|
86 |
+ |
|
87 |
+The simplest way is to use `adaptive.runner.simple` to run your |
|
88 |
+learner: |
|
89 |
+ |
|
90 |
+.. execute:: |
|
91 |
+ |
|
92 |
+ learner = adaptive.Learner1D(partial(f, wait=False), bounds=(-1, 1)) |
|
93 |
+ |
|
94 |
+ # blocks until completion |
|
95 |
+ adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.01) |
|
96 |
+ |
|
97 |
+ learner.plot() |
|
98 |
+ |
|
99 |
+Note that unlike `adaptive.Runner`, `adaptive.runner.simple` |
|
100 |
+*blocks* until it is finished. |
|
101 |
+ |
|
102 |
+If you want to enable determinism, want to continue using the |
|
103 |
+non-blocking `adaptive.Runner`, you can use the |
|
104 |
+`adaptive.runner.SequentialExecutor`: |
|
105 |
+ |
|
106 |
+.. execute:: |
|
107 |
+ |
|
108 |
+ from adaptive.runner import SequentialExecutor |
|
109 |
+ |
|
110 |
+ learner = adaptive.Learner1D(partial(f, wait=False), bounds=(-1, 1)) |
|
111 |
+ |
|
112 |
+ runner = adaptive.Runner(learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.01) |
|
113 |
+ |
|
114 |
+.. execute:: |
|
115 |
+ :hide-code: |
|
116 |
+ |
|
117 |
+ await runner.task # This is not needed in a notebook environment! |
|
118 |
+ |
|
119 |
+.. execute:: |
|
120 |
+ |
|
121 |
+ runner.live_info() |
|
122 |
+ |
|
123 |
+.. execute:: |
|
124 |
+ |
|
125 |
+ runner.live_plot(update_interval=0.1) |
|
126 |
+ |
|
127 |
+Cancelling a runner |
|
128 |
+------------------- |
|
129 |
+ |
|
130 |
+Sometimes you want to interactively explore a parameter space, and want |
|
131 |
+the function to be evaluated at finer and finer resolution and manually |
|
132 |
+control when the calculation stops. |
|
133 |
+ |
|
134 |
+If no ``goal`` is provided to a runner then the runner will run until |
|
135 |
+cancelled. |
|
136 |
+ |
|
137 |
+``runner.live_info()`` will provide a button that can be clicked to stop |
|
138 |
+the runner. You can also stop the runner programatically using |
|
139 |
+``runner.cancel()``. |
|
140 |
+ |
|
141 |
+.. execute:: |
|
142 |
+ |
|
143 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
144 |
+ runner = adaptive.Runner(learner) |
|
145 |
+ |
|
146 |
+.. execute:: |
|
147 |
+ :hide-code: |
|
148 |
+ |
|
149 |
+ import asyncio |
|
150 |
+ await asyncio.sleep(3) # This is not needed in the notebook! |
|
151 |
+ |
|
152 |
+.. execute:: |
|
153 |
+ |
|
154 |
+ runner.cancel() # Let's execute this after 3 seconds |
|
155 |
+ |
|
156 |
+.. execute:: |
|
157 |
+ |
|
158 |
+ runner.live_info() |
|
159 |
+ |
|
160 |
+.. execute:: |
|
161 |
+ |
|
162 |
+ runner.live_plot(update_interval=0.1) |
|
163 |
+ |
|
164 |
+.. execute:: |
|
165 |
+ |
|
166 |
+ print(runner.status()) |
|
167 |
+ |
|
168 |
+Debugging Problems |
|
169 |
+------------------ |
|
170 |
+ |
|
171 |
+Runners work in the background with respect to the IPython kernel, which |
|
172 |
+makes it convenient, but also means that inspecting errors is more |
|
173 |
+difficult because exceptions will not be raised directly in the |
|
174 |
+notebook. Often the only indication you will have that something has |
|
175 |
+gone wrong is that nothing will be happening. |
|
176 |
+ |
|
177 |
+Let’s look at the following example, where the function to be learned |
|
178 |
+will raise an exception 10% of the time. |
|
179 |
+ |
|
180 |
+.. execute:: |
|
181 |
+ |
|
182 |
+ def will_raise(x): |
|
183 |
+ from random import random |
|
184 |
+ from time import sleep |
|
185 |
+ |
|
186 |
+ sleep(random()) |
|
187 |
+ if random() < 0.1: |
|
188 |
+ raise RuntimeError('something went wrong!') |
|
189 |
+ return x**2 |
|
190 |
+ |
|
191 |
+ learner = adaptive.Learner1D(will_raise, (-1, 1)) |
|
192 |
+ runner = adaptive.Runner(learner) # without 'goal' the runner will run forever unless cancelled |
|
193 |
+ |
|
194 |
+ |
|
195 |
+.. execute:: |
|
196 |
+ :hide-code: |
|
197 |
+ |
|
198 |
+ import asyncio |
|
199 |
+ await asyncio.sleep(4) # in 4 seconds it will surely have failed |
|
200 |
+ |
|
201 |
+.. execute:: |
|
202 |
+ |
|
203 |
+ runner.live_info() |
|
204 |
+ |
|
205 |
+.. execute:: |
|
206 |
+ |
|
207 |
+ runner.live_plot() |
|
208 |
+ |
|
209 |
+The above runner should continue forever, but we notice that it stops |
|
210 |
+after a few points are evaluated. |
|
211 |
+ |
|
212 |
+First we should check that the runner has really finished: |
|
213 |
+ |
|
214 |
+.. execute:: |
|
215 |
+ |
|
216 |
+ runner.task.done() |
|
217 |
+ |
|
218 |
+If it has indeed finished then we should check the ``result`` of the |
|
219 |
+runner. This should be ``None`` if the runner stopped successfully. If |
|
220 |
+the runner stopped due to an exception then asking for the result will |
|
221 |
+raise the exception with the stack trace: |
|
222 |
+ |
|
223 |
+.. execute:: |
|
224 |
+ |
|
225 |
+ runner.task.result() |
|
226 |
+ |
|
227 |
+Logging runners |
|
228 |
+~~~~~~~~~~~~~~~ |
|
229 |
+ |
|
230 |
+Runners do their job in the background, which makes introspection quite |
|
231 |
+cumbersome. One way to inspect runners is to instantiate one with |
|
232 |
+``log=True``: |
|
233 |
+ |
|
234 |
+.. execute:: |
|
235 |
+ |
|
236 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
237 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.1, |
|
238 |
+ log=True) |
|
239 |
+ |
|
240 |
+.. execute:: |
|
241 |
+ :hide-code: |
|
242 |
+ |
|
243 |
+ await runner.task # This is not needed in a notebook environment! |
|
244 |
+ |
|
245 |
+.. execute:: |
|
246 |
+ |
|
247 |
+ runner.live_info() |
|
248 |
+ |
|
249 |
+This gives a the runner a ``log`` attribute, which is a list of the |
|
250 |
+``learner`` methods that were called, as well as their arguments. This |
|
251 |
+is useful because executors typically execute their tasks in a |
|
252 |
+non-deterministic order. |
|
253 |
+ |
|
254 |
+This can be used with `adaptive.runner.replay_log` to perfom the same |
|
255 |
+set of operations on another runner: |
|
256 |
+ |
|
257 |
+.. execute:: |
|
258 |
+ |
|
259 |
+ reconstructed_learner = adaptive.Learner1D(f, bounds=learner.bounds) |
|
260 |
+ adaptive.runner.replay_log(reconstructed_learner, runner.log) |
|
261 |
+ |
|
262 |
+.. execute:: |
|
263 |
+ |
|
264 |
+ learner.plot().Scatter.I.opts(style=dict(size=6)) * reconstructed_learner.plot() |
|
265 |
+ |
|
266 |
+Timing functions |
|
267 |
+~~~~~~~~~~~~~~~~ |
|
268 |
+ |
|
269 |
+To time the runner you **cannot** simply use |
|
270 |
+ |
|
271 |
+.. code:: python |
|
272 |
+ |
|
273 |
+ now = datetime.now() |
|
274 |
+ runner = adaptive.Runner(...) |
|
275 |
+ print(datetime.now() - now) |
|
276 |
+ |
|
277 |
+because this will be done immediately. Also blocking the kernel with |
|
278 |
+``while not runner.task.done()`` will not work because the runner will |
|
279 |
+not do anything when the kernel is blocked. |
|
280 |
+ |
|
281 |
+Therefore you need to create an ``async`` function and hook it into the |
|
282 |
+``ioloop`` like so: |
|
283 |
+ |
|
284 |
+.. execute:: |
|
285 |
+ |
|
286 |
+ import asyncio |
|
287 |
+ |
|
288 |
+ async def time(runner): |
|
289 |
+ from datetime import datetime |
|
290 |
+ now = datetime.now() |
|
291 |
+ await runner.task |
|
292 |
+ return datetime.now() - now |
|
293 |
+ |
|
294 |
+ ioloop = asyncio.get_event_loop() |
|
295 |
+ |
|
296 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
297 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
|
298 |
+ |
|
299 |
+ timer = ioloop.create_task(time(runner)) |
|
300 |
+ |
|
301 |
+.. execute:: |
|
302 |
+ :hide-code: |
|
303 |
+ |
|
304 |
+ await runner.task # This is not needed in a notebook environment! |
|
305 |
+ |
|
306 |
+.. execute:: |
|
307 |
+ |
|
308 |
+ # The result will only be set when the runner is done. |
|
309 |
+ timer.result() |
|
310 |
+ |
|
311 |
+Using Runners from a script |
|
312 |
+--------------------------- |
|
313 |
+ |
|
314 |
+Runners can also be used from a Python script independently of the |
|
315 |
+notebook. |
|
316 |
+ |
|
317 |
+The simplest way to accomplish this is simply to use the |
|
318 |
+`~adaptive.BlockingRunner`: |
|
319 |
+ |
|
320 |
+.. code:: python |
|
321 |
+ |
|
322 |
+ import adaptive |
|
323 |
+ |
|
324 |
+ def f(x): |
|
325 |
+ return x |
|
326 |
+ |
|
327 |
+ learner = adaptive.Learner1D(f, (-1, 1)) |
|
328 |
+ |
|
329 |
+ adaptive.BlockingRunner(learner, goal=lambda: l: l.loss() < 0.1) |
|
330 |
+ |
|
331 |
+If you use `asyncio` already in your script and want to integrate |
|
332 |
+``adaptive`` into it, then you can use the default `~adaptive.Runner` as you |
|
333 |
+would from a notebook. If you want to wait for the runner to finish, |
|
334 |
+then you can simply |
|
335 |
+ |
|
336 |
+.. code:: python |
|
337 |
+ |
|
338 |
+ await runner.task |
|
339 |
+ |
|
340 |
+from within a coroutine. |
0 | 341 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,176 @@ |
1 |
+Custom adaptive logic for 1D and 2D |
|
2 |
+----------------------------------- |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`custom-loss-function` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: custom-loss-function |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+ # Import modules that are used in multiple cells |
|
21 |
+ import numpy as np |
|
22 |
+ from functools import partial |
|
23 |
+ |
|
24 |
+ |
|
25 |
+`~adaptive.Learner1D` and `~adaptive.Learner2D` both work on the principle of |
|
26 |
+subdividing their domain into subdomains, and assigning a property to |
|
27 |
+each subdomain, which we call the *loss*. The algorithm for choosing the |
|
28 |
+best place to evaluate our function is then simply *take the subdomain |
|
29 |
+with the largest loss and add a point in the center, creating new |
|
30 |
+subdomains around this point*. |
|
31 |
+ |
|
32 |
+The *loss function* that defines the loss per subdomain is the canonical |
|
33 |
+place to define what regions of the domain are “interesting”. The |
|
34 |
+default loss function for `~adaptive.Learner1D` and `~adaptive.Learner2D` is sufficient |
|
35 |
+for a wide range of common cases, but it is by no means a panacea. For |
|
36 |
+example, the default loss function will tend to get stuck on |
|
37 |
+divergences. |
|
38 |
+ |
|
39 |
+Both the `~adaptive.Learner1D` and `~adaptive.Learner2D` allow you to specify a *custom |
|
40 |
+loss function*. Below we illustrate how you would go about writing your |
|
41 |
+own loss function. The documentation for `~adaptive.Learner1D` and `~adaptive.Learner2D` |
|
42 |
+specifies the signature that your loss function needs to have in order |
|
43 |
+for it to work with ``adaptive``. |
|
44 |
+ |
|
45 |
+tl;dr, one can use the following *loss functions* that |
|
46 |
+**we** already implemented: |
|
47 |
+ |
|
48 |
++ `adaptive.learner.learner1D.default_loss` |
|
49 |
++ `adaptive.learner.learner1D.uniform_loss` |
|
50 |
++ `adaptive.learner.learner2D.default_loss` |
|
51 |
++ `adaptive.learner.learner2D.uniform_loss` |
|
52 |
++ `adaptive.learner.learner2D.minimize_triangle_surface_loss` |
|
53 |
++ `adaptive.learner.learner2D.resolution_loss` |
|
54 |
+ |
|
55 |
+ |
|
56 |
+Uniform sampling |
|
57 |
+~~~~~~~~~~~~~~~~ |
|
58 |
+ |
|
59 |
+Say we want to properly sample a function that contains divergences. A |
|
60 |
+simple (but naive) strategy is to *uniformly* sample the domain: |
|
61 |
+ |
|
62 |
+.. execute:: |
|
63 |
+ |
|
64 |
+ def uniform_sampling_1d(interval, scale, function_values): |
|
65 |
+ # Note that we never use 'function_values'; the loss is just the size of the subdomain |
|
66 |
+ x_left, x_right = interval |
|
67 |
+ x_scale, _ = scale |
|
68 |
+ dx = (x_right - x_left) / x_scale |
|
69 |
+ return dx |
|
70 |
+ |
|
71 |
+ def f_divergent_1d(x): |
|
72 |
+ return 1 / x**2 |
|
73 |
+ |
|
74 |
+ learner = adaptive.Learner1D(f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d) |
|
75 |
+ runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) |
|
76 |
+ learner.plot().select(y=(0, 10000)) |
|
77 |
+ |
|
78 |
+.. execute:: |
|
79 |
+ |
|
80 |
+ %%opts EdgePaths (color='w') Image [logz=True colorbar=True] |
|
81 |
+ |
|
82 |
+ from adaptive.runner import SequentialExecutor |
|
83 |
+ |
|
84 |
+ def uniform_sampling_2d(ip): |
|
85 |
+ from adaptive.learner.learner2D import areas |
|
86 |
+ A = areas(ip) |
|
87 |
+ return np.sqrt(A) |
|
88 |
+ |
|
89 |
+ def f_divergent_2d(xy): |
|
90 |
+ x, y = xy |
|
91 |
+ return 1 / (x**2 + y**2) |
|
92 |
+ |
|
93 |
+ learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=uniform_sampling_2d) |
|
94 |
+ |
|
95 |
+ # this takes a while, so use the async Runner so we know *something* is happening |
|
96 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.02) |
|
97 |
+ |
|
98 |
+.. execute:: |
|
99 |
+ :hide-code: |
|
100 |
+ |
|
101 |
+ await runner.task # This is not needed in a notebook environment! |
|
102 |
+ |
|
103 |
+.. execute:: |
|
104 |
+ |
|
105 |
+ runner.live_info() |
|
106 |
+ |
|
107 |
+.. execute:: |
|
108 |
+ |
|
109 |
+ plotter = lambda l: l.plot(tri_alpha=0.3).relabel( |
|
110 |
+ '1 / (x^2 + y^2) in log scale') |
|
111 |
+ runner.live_plot(update_interval=0.2, plotter=plotter) |
|
112 |
+ |
|
113 |
+The uniform sampling strategy is a common case to benchmark against, so |
|
114 |
+the 1D and 2D versions are included in ``adaptive`` as |
|
115 |
+`adaptive.learner.learner1D.uniform_loss` and |
|
116 |
+`adaptive.learner.learner2D.uniform_loss`. |
|
117 |
+ |
|
118 |
+Doing better |
|
119 |
+~~~~~~~~~~~~ |
|
120 |
+ |
|
121 |
+Of course, using ``adaptive`` for uniform sampling is a bit of a waste! |
|
122 |
+ |
|
123 |
+Let’s see if we can do a bit better. Below we define a loss per |
|
124 |
+subdomain that scales with the degree of nonlinearity of the function |
|
125 |
+(this is very similar to the default loss function for `~adaptive.Learner2D`), |
|
126 |
+but which is 0 for subdomains smaller than a certain area, and infinite |
|
127 |
+for subdomains larger than a certain area. |
|
128 |
+ |
|
129 |
+A loss defined in this way means that the adaptive algorithm will first |
|
130 |
+prioritise subdomains that are too large (infinite loss). After all |
|
131 |
+subdomains are appropriately small it will prioritise places where the |
|
132 |
+function is very nonlinear, but will ignore subdomains that are too |
|
133 |
+small (0 loss). |
|
134 |
+ |
|
135 |
+.. execute:: |
|
136 |
+ |
|
137 |
+ %%opts EdgePaths (color='w') Image [logz=True colorbar=True] |
|
138 |
+ |
|
139 |
+ def resolution_loss(ip, min_distance=0, max_distance=1): |
|
140 |
+ """min_distance and max_distance should be in between 0 and 1 |
|
141 |
+ because the total area is normalized to 1.""" |
|
142 |
+ |
|
143 |
+ from adaptive.learner.learner2D import areas, deviations |
|
144 |
+ |
|
145 |
+ A = areas(ip) |
|
146 |
+ |
|
147 |
+ # 'deviations' returns an array of shape '(n, len(ip))', where |
|
148 |
+ # 'n' is the is the dimension of the output of the learned function |
|
149 |
+ # In this case we know that the learned function returns a scalar, |
|
150 |
+ # so 'deviations' returns an array of shape '(1, len(ip))'. |
|
151 |
+ # It represents the deviation of the function value from a linear estimate |
|
152 |
+ # over each triangular subdomain. |
|
153 |
+ dev = deviations(ip)[0] |
|
154 |
+ |
|
155 |
+ # we add terms of the same dimension: dev == [distance], A == [distance**2] |
|
156 |
+ loss = np.sqrt(A) * dev + A |
|
157 |
+ |
|
158 |
+ # Setting areas with a small area to zero such that they won't be chosen again |
|
159 |
+ loss[A < min_distance**2] = 0 |
|
160 |
+ |
|
161 |
+ # Setting triangles that have a size larger than max_distance to infinite loss |
|
162 |
+ loss[A > max_distance**2] = np.inf |
|
163 |
+ |
|
164 |
+ return loss |
|
165 |
+ |
|
166 |
+ loss = partial(resolution_loss, min_distance=0.01) |
|
167 |
+ |
|
168 |
+ learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss) |
|
169 |
+ runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.02) |
|
170 |
+ learner.plot(tri_alpha=0.3).relabel('1 / (x^2 + y^2) in log scale') |
|
171 |
+ |
|
172 |
+Awesome! We zoom in on the singularity, but not at the expense of |
|
173 |
+sampling the rest of the domain a reasonable amount. |
|
174 |
+ |
|
175 |
+The above strategy is available as |
|
176 |
+`adaptive.learner.learner2D.resolution_loss`. |
0 | 177 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,55 @@ |
1 |
+Parallelism - using multiple cores |
|
2 |
+---------------------------------- |
|
3 |
+ |
|
4 |
+Often you will want to evaluate the function on some remote computing |
|
5 |
+resources. ``adaptive`` works out of the box with any framework that |
|
6 |
+implements a `PEP 3148 <https://www.python.org/dev/peps/pep-3148/>`__ |
|
7 |
+compliant executor that returns `concurrent.futures.Future` objects. |
|
8 |
+ |
|
9 |
+`concurrent.futures` |
|
10 |
+~~~~~~~~~~~~~~~~~~~~ |
|
11 |
+ |
|
12 |
+On Unix-like systems by default `adaptive.Runner` creates a |
|
13 |
+`~concurrent.futures.ProcessPoolExecutor`, but you can also pass |
|
14 |
+one explicitly e.g. to limit the number of workers: |
|
15 |
+ |
|
16 |
+.. code:: python |
|
17 |
+ |
|
18 |
+ from concurrent.futures import ProcessPoolExecutor |
|
19 |
+ |
|
20 |
+ executor = ProcessPoolExecutor(max_workers=4) |
|
21 |
+ |
|
22 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
23 |
+ runner = adaptive.Runner(learner, executor=executor, goal=lambda l: l.loss() < 0.05) |
|
24 |
+ runner.live_info() |
|
25 |
+ runner.live_plot(update_interval=0.1) |
|
26 |
+ |
|
27 |
+`ipyparallel.Client` |
|
28 |
+~~~~~~~~~~~~~~~~~~~~ |
|
29 |
+ |
|
30 |
+.. code:: python |
|
31 |
+ |
|
32 |
+ import ipyparallel |
|
33 |
+ |
|
34 |
+ client = ipyparallel.Client() # You will need to start an `ipcluster` to make this work |
|
35 |
+ |
|
36 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
37 |
+ runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01) |
|
38 |
+ runner.live_info() |
|
39 |
+ runner.live_plot() |
|
40 |
+ |
|
41 |
+`distributed.Client` |
|
42 |
+~~~~~~~~~~~~~~~~~~~~ |
|
43 |
+ |
|
44 |
+On Windows by default `adaptive.Runner` uses a `distributed.Client`. |
|
45 |
+ |
|
46 |
+.. code:: python |
|
47 |
+ |
|
48 |
+ import distributed |
|
49 |
+ |
|
50 |
+ client = distributed.Client() |
|
51 |
+ |
|
52 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
53 |
+ runner = adaptive.Runner(learner, executor=client, goal=lambda l: l.loss() < 0.01) |
|
54 |
+ runner.live_info() |
|
55 |
+ runner.live_plot(update_interval=0.1) |
0 | 56 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,49 @@ |
1 |
+Tutorial Adaptive |
|
2 |
+================= |
|
3 |
+ |
|
4 |
+.. warning:: |
|
5 |
+ This documentation is not functional yet! Whenever |
|
6 |
+ `this Pull Request <https://github.com/jupyter-widgets/jupyter-sphinx/pull/22/>`__. |
|
7 |
+ is done, the documentation will be correctly build. |
|
8 |
+ |
|
9 |
+`Adaptive <https://gitlab.kwant-project.org/qt/adaptive-evaluation>`__ |
|
10 |
+is a package for adaptively sampling functions with support for parallel |
|
11 |
+evaluation. |
|
12 |
+ |
|
13 |
+This is an introductory notebook that shows some basic use cases. |
|
14 |
+ |
|
15 |
+``adaptive`` needs at least Python 3.6, and the following packages: |
|
16 |
+ |
|
17 |
+- ``scipy`` |
|
18 |
+- ``sortedcontainers`` |
|
19 |
+ |
|
20 |
+Additionally ``adaptive`` has lots of extra functionality that makes it |
|
21 |
+simple to use from Jupyter notebooks. This extra functionality depends |
|
22 |
+on the following packages |
|
23 |
+ |
|
24 |
+- ``ipykernel>=4.8.0`` |
|
25 |
+- ``jupyter_client>=5.2.2`` |
|
26 |
+- ``holoviews`` |
|
27 |
+- ``bokeh`` |
|
28 |
+- ``ipywidgets`` |
|
29 |
+ |
|
30 |
+ |
|
31 |
+.. note:: |
|
32 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
33 |
+ and ``live_info`` widget is not live. Download the notebooks |
|
34 |
+ in order to see the real behaviour. |
|
35 |
+ |
|
36 |
+.. toctree:: |
|
37 |
+ :hidden: |
|
38 |
+ |
|
39 |
+ tutorial.Learner1D |
|
40 |
+ tutorial.Learner2D |
|
41 |
+ tutorial.custom_loss |
|
42 |
+ tutorial.AverageLearner |
|
43 |
+ tutorial.BalancingLearner |
|
44 |
+ tutorial.DataSaver |
|
45 |
+ tutorial.IntegratorLearner |
|
46 |
+ tutorial.LearnerND |
|
47 |
+ tutorial.SKOptLearner |
|
48 |
+ tutorial.parallelism |
|
49 |
+ tutorial.advanced-topics |