... | ... |
@@ -297,12 +297,12 @@ raise the exception with the stack trace: |
297 | 297 |
runner.task.result() |
298 | 298 |
|
299 | 299 |
|
300 |
-You can also check ``runner.tracebacks`` which is a mapping from |
|
301 |
-point → traceback. |
|
300 |
+You can also check ``runner.tracebacks`` which is a list of tuples with |
|
301 |
+(point, traceback). |
|
302 | 302 |
|
303 | 303 |
.. jupyter-execute:: |
304 | 304 |
|
305 |
- for point, tb in runner.tracebacks.items(): |
|
305 |
+ for point, tb in runner.tracebacks: |
|
306 | 306 |
print(f'point: {point}:\n {tb}') |
307 | 307 |
|
308 | 308 |
Logging runners |
... | ... |
@@ -413,7 +413,7 @@ The simplest way to accomplish this is simply to use the |
413 | 413 |
|
414 | 414 |
learner = adaptive.Learner1D(f, (-1, 1)) |
415 | 415 |
|
416 |
- adaptive.BlockingRunner(learner, goal=lambda: l: l.loss() < 0.1) |
|
416 |
+ adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.1) |
|
417 | 417 |
|
418 | 418 |
If you use `asyncio` already in your script and want to integrate |
419 | 419 |
``adaptive`` into it, then you can use the default `~adaptive.Runner` as you |
... | ... |
@@ -33,13 +33,10 @@ Saving and loading learners |
33 | 33 |
Every learner has a `~adaptive.BaseLearner.save` and `~adaptive.BaseLearner.load` |
34 | 34 |
method that can be used to save and load **only** the data of a learner. |
35 | 35 |
|
36 |
-There are **two ways** of naming the files: 1. Using the ``fname`` |
|
37 |
-argument in ``learner.save(fname=...)`` 2. Setting the ``fname`` |
|
38 |
-attribute, like ``learner.fname = 'data/example.p`` and then |
|
39 |
-``learner.save()`` |
|
36 |
+Use the ``fname`` argument in ``learner.save(fname=...)``. |
|
40 | 37 |
|
41 |
-The second way *must be used* when saving the ``learner``\s of a |
|
42 |
-`~adaptive.BalancingLearner`. |
|
38 |
+Or, when using a `~adaptive.BalancingLearner` one can use either a callable |
|
39 |
+that takes the child learner and returns a filename **or** a list of filenames. |
|
43 | 40 |
|
44 | 41 |
By default the resulting pickle files are compressed, to turn this off |
45 | 42 |
use ``learner.save(fname=..., compress=False)`` |
... | ... |
@@ -298,6 +298,15 @@ raise the exception with the stack trace: |
298 | 298 |
|
299 | 299 |
runner.task.result() |
300 | 300 |
|
301 |
+ |
|
302 |
+You can also check ``runner.tracebacks`` which is a mapping from |
|
303 |
+point → traceback. |
|
304 |
+ |
|
305 |
+.. jupyter-execute:: |
|
306 |
+ |
|
307 |
+ for point, tb in runner.tracebacks.items(): |
|
308 |
+ print(f'point: {point}:\n {tb}') |
|
309 |
+ |
|
301 | 310 |
Logging runners |
302 | 311 |
~~~~~~~~~~~~~~~ |
303 | 312 |
|
... | ... |
@@ -337,10 +346,16 @@ set of operations on another runner: |
337 | 346 |
|
338 | 347 |
learner.plot().Scatter.I.opts(style=dict(size=6)) * reconstructed_learner.plot() |
339 | 348 |
|
340 |
-Timing functions |
|
341 |
-~~~~~~~~~~~~~~~~ |
|
349 |
+Adding coroutines |
|
350 |
+----------------- |
|
351 |
+ |
|
352 |
+In the following example we'll add a `~asyncio.Task` that times the runner. |
|
353 |
+This is *only* for demonstration purposes because one can simply |
|
354 |
+check ``runner.elapsed_time()`` or use the ``runner.live_info()`` |
|
355 |
+widget to see the time since the runner has started. |
|
342 | 356 |
|
343 |
-To time the runner you **cannot** simply use |
|
357 |
+So let's get on with the example. To time the runner |
|
358 |
+you **cannot** simply use |
|
344 | 359 |
|
345 | 360 |
.. code:: python |
346 | 361 |
|
... | ... |
@@ -8,11 +8,10 @@ Advanced Topics |
8 | 8 |
|
9 | 9 |
.. seealso:: |
10 | 10 |
The complete source code of this tutorial can be found in |
11 |
- :jupyter-download:notebook:`advanced-topics` |
|
11 |
+ :jupyter-download:notebook:`tutorial.advanced-topics` |
|
12 | 12 |
|
13 |
-.. execute:: |
|
13 |
+.. jupyter-execute:: |
|
14 | 14 |
:hide-code: |
15 |
- :new-notebook: advanced-topics |
|
16 | 15 |
|
17 | 16 |
import adaptive |
18 | 17 |
adaptive.notebook_extension() |
... | ... |
@@ -45,7 +44,7 @@ The second way *must be used* when saving the ``learner``\s of a |
45 | 44 |
By default the resulting pickle files are compressed, to turn this off |
46 | 45 |
use ``learner.save(fname=..., compress=False)`` |
47 | 46 |
|
48 |
-.. execute:: |
|
47 |
+.. jupyter-execute:: |
|
49 | 48 |
|
50 | 49 |
# Let's create two learners and run only one. |
51 | 50 |
learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
... | ... |
@@ -54,16 +53,16 @@ use ``learner.save(fname=..., compress=False)`` |
54 | 53 |
# Let's only run the learner |
55 | 54 |
runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
56 | 55 |
|
57 |
-.. execute:: |
|
56 |
+.. jupyter-execute:: |
|
58 | 57 |
:hide-code: |
59 | 58 |
|
60 | 59 |
await runner.task # This is not needed in a notebook environment! |
61 | 60 |
|
62 |
-.. execute:: |
|
61 |
+.. jupyter-execute:: |
|
63 | 62 |
|
64 | 63 |
runner.live_info() |
65 | 64 |
|
66 |
-.. execute:: |
|
65 |
+.. jupyter-execute:: |
|
67 | 66 |
|
68 | 67 |
fname = 'data/example_file.p' |
69 | 68 |
learner.save(fname) |
... | ... |
@@ -74,7 +73,7 @@ use ``learner.save(fname=..., compress=False)`` |
74 | 73 |
|
75 | 74 |
Or just (without saving): |
76 | 75 |
|
77 |
-.. execute:: |
|
76 |
+.. jupyter-execute:: |
|
78 | 77 |
|
79 | 78 |
control = adaptive.Learner1D(f, bounds=(-1, 1)) |
80 | 79 |
control.copy_from(learner) |
... | ... |
@@ -82,7 +81,7 @@ Or just (without saving): |
82 | 81 |
One can also periodically save the learner while running in a |
83 | 82 |
`~adaptive.Runner`. Use it like: |
84 | 83 |
|
85 |
-.. execute:: |
|
84 |
+.. jupyter-execute:: |
|
86 | 85 |
|
87 | 86 |
def slow_f(x): |
88 | 87 |
from time import sleep |
... | ... |
@@ -93,17 +92,17 @@ One can also periodically save the learner while running in a |
93 | 92 |
runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 100) |
94 | 93 |
runner.start_periodic_saving(save_kwargs=dict(fname='data/periodic_example.p'), interval=6) |
95 | 94 |
|
96 |
-.. execute:: |
|
95 |
+.. jupyter-execute:: |
|
97 | 96 |
:hide-code: |
98 | 97 |
|
99 | 98 |
await asyncio.sleep(6) # This is not needed in a notebook environment! |
100 | 99 |
runner.cancel() |
101 | 100 |
|
102 |
-.. execute:: |
|
101 |
+.. jupyter-execute:: |
|
103 | 102 |
|
104 | 103 |
runner.live_info() # we cancelled it after 6 seconds |
105 | 104 |
|
106 |
-.. execute:: |
|
105 |
+.. jupyter-execute:: |
|
107 | 106 |
|
108 | 107 |
# See the data 6 later seconds with |
109 | 108 |
!ls -lah data # only works on macOS and Linux systems |
... | ... |
@@ -137,7 +136,7 @@ something until its done? |
137 | 136 |
The simplest way to accomplish this is to use |
138 | 137 |
`adaptive.BlockingRunner`: |
139 | 138 |
|
140 |
-.. execute:: |
|
139 |
+.. jupyter-execute:: |
|
141 | 140 |
|
142 | 141 |
learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
143 | 142 |
adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) |
... | ... |
@@ -164,7 +163,7 @@ way with adaptive. |
164 | 163 |
The simplest way is to use `adaptive.runner.simple` to run your |
165 | 164 |
learner: |
166 | 165 |
|
167 |
-.. execute:: |
|
166 |
+.. jupyter-execute:: |
|
168 | 167 |
|
169 | 168 |
learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
170 | 169 |
|
... | ... |
@@ -180,7 +179,7 @@ If you want to enable determinism, want to continue using the |
180 | 179 |
non-blocking `adaptive.Runner`, you can use the |
181 | 180 |
`adaptive.runner.SequentialExecutor`: |
182 | 181 |
|
183 |
-.. execute:: |
|
182 |
+.. jupyter-execute:: |
|
184 | 183 |
|
185 | 184 |
from adaptive.runner import SequentialExecutor |
186 | 185 |
|
... | ... |
@@ -188,16 +187,16 @@ non-blocking `adaptive.Runner`, you can use the |
188 | 187 |
|
189 | 188 |
runner = adaptive.Runner(learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.01) |
190 | 189 |
|
191 |
-.. execute:: |
|
190 |
+.. jupyter-execute:: |
|
192 | 191 |
:hide-code: |
193 | 192 |
|
194 | 193 |
await runner.task # This is not needed in a notebook environment! |
195 | 194 |
|
196 |
-.. execute:: |
|
195 |
+.. jupyter-execute:: |
|
197 | 196 |
|
198 | 197 |
runner.live_info() |
199 | 198 |
|
200 |
-.. execute:: |
|
199 |
+.. jupyter-execute:: |
|
201 | 200 |
|
202 | 201 |
runner.live_plot(update_interval=0.1) |
203 | 202 |
|
... | ... |
@@ -215,29 +214,29 @@ cancelled. |
215 | 214 |
the runner. You can also stop the runner programatically using |
216 | 215 |
``runner.cancel()``. |
217 | 216 |
|
218 |
-.. execute:: |
|
217 |
+.. jupyter-execute:: |
|
219 | 218 |
|
220 | 219 |
learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
221 | 220 |
runner = adaptive.Runner(learner) |
222 | 221 |
|
223 |
-.. execute:: |
|
222 |
+.. jupyter-execute:: |
|
224 | 223 |
:hide-code: |
225 | 224 |
|
226 | 225 |
await asyncio.sleep(0.1) # This is not needed in the notebook! |
227 | 226 |
|
228 |
-.. execute:: |
|
227 |
+.. jupyter-execute:: |
|
229 | 228 |
|
230 | 229 |
runner.cancel() # Let's execute this after 0.1 seconds |
231 | 230 |
|
232 |
-.. execute:: |
|
231 |
+.. jupyter-execute:: |
|
233 | 232 |
|
234 | 233 |
runner.live_info() |
235 | 234 |
|
236 |
-.. execute:: |
|
235 |
+.. jupyter-execute:: |
|
237 | 236 |
|
238 | 237 |
runner.live_plot(update_interval=0.1) |
239 | 238 |
|
240 |
-.. execute:: |
|
239 |
+.. jupyter-execute:: |
|
241 | 240 |
|
242 | 241 |
print(runner.status()) |
243 | 242 |
|
... | ... |
@@ -253,7 +252,7 @@ gone wrong is that nothing will be happening. |
253 | 252 |
Let’s look at the following example, where the function to be learned |
254 | 253 |
will raise an exception 10% of the time. |
255 | 254 |
|
256 |
-.. execute:: |
|
255 |
+.. jupyter-execute:: |
|
257 | 256 |
|
258 | 257 |
def will_raise(x): |
259 | 258 |
from random import random |
... | ... |
@@ -268,16 +267,16 @@ will raise an exception 10% of the time. |
268 | 267 |
runner = adaptive.Runner(learner) # without 'goal' the runner will run forever unless cancelled |
269 | 268 |
|
270 | 269 |
|
271 |
-.. execute:: |
|
270 |
+.. jupyter-execute:: |
|
272 | 271 |
:hide-code: |
273 | 272 |
|
274 | 273 |
await asyncio.sleep(4) # in 4 seconds it will surely have failed |
275 | 274 |
|
276 |
-.. execute:: |
|
275 |
+.. jupyter-execute:: |
|
277 | 276 |
|
278 | 277 |
runner.live_info() |
279 | 278 |
|
280 |
-.. execute:: |
|
279 |
+.. jupyter-execute:: |
|
281 | 280 |
|
282 | 281 |
runner.live_plot() |
283 | 282 |
|
... | ... |
@@ -286,7 +285,7 @@ after a few points are evaluated. |
286 | 285 |
|
287 | 286 |
First we should check that the runner has really finished: |
288 | 287 |
|
289 |
-.. execute:: |
|
288 |
+.. jupyter-execute:: |
|
290 | 289 |
|
291 | 290 |
runner.task.done() |
292 | 291 |
|
... | ... |
@@ -295,7 +294,7 @@ runner. This should be ``None`` if the runner stopped successfully. If |
295 | 294 |
the runner stopped due to an exception then asking for the result will |
296 | 295 |
raise the exception with the stack trace: |
297 | 296 |
|
298 |
-.. execute:: |
|
297 |
+.. jupyter-execute:: |
|
299 | 298 |
|
300 | 299 |
runner.task.result() |
301 | 300 |
|
... | ... |
@@ -306,18 +305,18 @@ Runners do their job in the background, which makes introspection quite |
306 | 305 |
cumbersome. One way to inspect runners is to instantiate one with |
307 | 306 |
``log=True``: |
308 | 307 |
|
309 |
-.. execute:: |
|
308 |
+.. jupyter-execute:: |
|
310 | 309 |
|
311 | 310 |
learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
312 |
- runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.1, |
|
311 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01, |
|
313 | 312 |
log=True) |
314 | 313 |
|
315 |
-.. execute:: |
|
314 |
+.. jupyter-execute:: |
|
316 | 315 |
:hide-code: |
317 | 316 |
|
318 | 317 |
await runner.task # This is not needed in a notebook environment! |
319 | 318 |
|
320 |
-.. execute:: |
|
319 |
+.. jupyter-execute:: |
|
321 | 320 |
|
322 | 321 |
runner.live_info() |
323 | 322 |
|
... | ... |
@@ -329,12 +328,12 @@ non-deterministic order. |
329 | 328 |
This can be used with `adaptive.runner.replay_log` to perfom the same |
330 | 329 |
set of operations on another runner: |
331 | 330 |
|
332 |
-.. execute:: |
|
331 |
+.. jupyter-execute:: |
|
333 | 332 |
|
334 | 333 |
reconstructed_learner = adaptive.Learner1D(f, bounds=learner.bounds) |
335 | 334 |
adaptive.runner.replay_log(reconstructed_learner, runner.log) |
336 | 335 |
|
337 |
-.. execute:: |
|
336 |
+.. jupyter-execute:: |
|
338 | 337 |
|
339 | 338 |
learner.plot().Scatter.I.opts(style=dict(size=6)) * reconstructed_learner.plot() |
340 | 339 |
|
... | ... |
@@ -356,7 +355,7 @@ not do anything when the kernel is blocked. |
356 | 355 |
Therefore you need to create an ``async`` function and hook it into the |
357 | 356 |
``ioloop`` like so: |
358 | 357 |
|
359 |
-.. execute:: |
|
358 |
+.. jupyter-execute:: |
|
360 | 359 |
|
361 | 360 |
import asyncio |
362 | 361 |
|
... | ... |
@@ -373,12 +372,12 @@ Therefore you need to create an ``async`` function and hook it into the |
373 | 372 |
|
374 | 373 |
timer = ioloop.create_task(time(runner)) |
375 | 374 |
|
376 |
-.. execute:: |
|
375 |
+.. jupyter-execute:: |
|
377 | 376 |
:hide-code: |
378 | 377 |
|
379 | 378 |
await runner.task # This is not needed in a notebook environment! |
380 | 379 |
|
381 |
-.. execute:: |
|
380 |
+.. jupyter-execute:: |
|
382 | 381 |
|
383 | 382 |
# The result will only be set when the runner is done. |
384 | 383 |
timer.result() |
... | ... |
@@ -17,21 +17,98 @@ Advanced Topics |
17 | 17 |
import adaptive |
18 | 18 |
adaptive.notebook_extension() |
19 | 19 |
|
20 |
+ import asyncio |
|
20 | 21 |
from functools import partial |
21 | 22 |
import random |
22 | 23 |
|
23 | 24 |
offset = random.uniform(-0.5, 0.5) |
24 | 25 |
|
25 |
- def f(x, offset=offset, wait=True): |
|
26 |
- from time import sleep |
|
27 |
- from random import random |
|
28 |
- |
|
26 |
+ def f(x, offset=offset): |
|
29 | 27 |
a = 0.01 |
30 |
- if wait: |
|
31 |
- sleep(random()) |
|
32 | 28 |
return x + a**2 / (a**2 + (x - offset)**2) |
33 | 29 |
|
34 | 30 |
|
31 |
+Saving and loading learners |
|
32 |
+--------------------------- |
|
33 |
+ |
|
34 |
+Every learner has a `~adaptive.BaseLearner.save` and `~adaptive.BaseLearner.load` |
|
35 |
+method that can be used to save and load **only** the data of a learner. |
|
36 |
+ |
|
37 |
+There are **two ways** of naming the files: 1. Using the ``fname`` |
|
38 |
+argument in ``learner.save(fname=...)`` 2. Setting the ``fname`` |
|
39 |
+attribute, like ``learner.fname = 'data/example.p`` and then |
|
40 |
+``learner.save()`` |
|
41 |
+ |
|
42 |
+The second way *must be used* when saving the ``learner``\s of a |
|
43 |
+`~adaptive.BalancingLearner`. |
|
44 |
+ |
|
45 |
+By default the resulting pickle files are compressed, to turn this off |
|
46 |
+use ``learner.save(fname=..., compress=False)`` |
|
47 |
+ |
|
48 |
+.. execute:: |
|
49 |
+ |
|
50 |
+ # Let's create two learners and run only one. |
|
51 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
52 |
+ control = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
53 |
+ |
|
54 |
+ # Let's only run the learner |
|
55 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
|
56 |
+ |
|
57 |
+.. execute:: |
|
58 |
+ :hide-code: |
|
59 |
+ |
|
60 |
+ await runner.task # This is not needed in a notebook environment! |
|
61 |
+ |
|
62 |
+.. execute:: |
|
63 |
+ |
|
64 |
+ runner.live_info() |
|
65 |
+ |
|
66 |
+.. execute:: |
|
67 |
+ |
|
68 |
+ fname = 'data/example_file.p' |
|
69 |
+ learner.save(fname) |
|
70 |
+ control.load(fname) |
|
71 |
+ |
|
72 |
+ (learner.plot().relabel('saved learner') |
|
73 |
+ + control.plot().relabel('loaded learner')) |
|
74 |
+ |
|
75 |
+Or just (without saving): |
|
76 |
+ |
|
77 |
+.. execute:: |
|
78 |
+ |
|
79 |
+ control = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
80 |
+ control.copy_from(learner) |
|
81 |
+ |
|
82 |
+One can also periodically save the learner while running in a |
|
83 |
+`~adaptive.Runner`. Use it like: |
|
84 |
+ |
|
85 |
+.. execute:: |
|
86 |
+ |
|
87 |
+ def slow_f(x): |
|
88 |
+ from time import sleep |
|
89 |
+ sleep(5) |
|
90 |
+ return x |
|
91 |
+ |
|
92 |
+ learner = adaptive.Learner1D(slow_f, bounds=[0, 1]) |
|
93 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.npoints > 100) |
|
94 |
+ runner.start_periodic_saving(save_kwargs=dict(fname='data/periodic_example.p'), interval=6) |
|
95 |
+ |
|
96 |
+.. execute:: |
|
97 |
+ :hide-code: |
|
98 |
+ |
|
99 |
+ await asyncio.sleep(6) # This is not needed in a notebook environment! |
|
100 |
+ runner.cancel() |
|
101 |
+ |
|
102 |
+.. execute:: |
|
103 |
+ |
|
104 |
+ runner.live_info() # we cancelled it after 6 seconds |
|
105 |
+ |
|
106 |
+.. execute:: |
|
107 |
+ |
|
108 |
+ # See the data 6 later seconds with |
|
109 |
+ !ls -lah data # only works on macOS and Linux systems |
|
110 |
+ |
|
111 |
+ |
|
35 | 112 |
A watched pot never boils! |
36 | 113 |
-------------------------- |
37 | 114 |
|
... | ... |
@@ -62,7 +139,7 @@ The simplest way to accomplish this is to use |
62 | 139 |
|
63 | 140 |
.. execute:: |
64 | 141 |
|
65 |
- learner = adaptive.Learner1D(partial(f, wait=False), bounds=(-1, 1)) |
|
142 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
66 | 143 |
adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) |
67 | 144 |
# This will only get run after the runner has finished |
68 | 145 |
learner.plot() |
... | ... |
@@ -89,7 +166,7 @@ learner: |
89 | 166 |
|
90 | 167 |
.. execute:: |
91 | 168 |
|
92 |
- learner = adaptive.Learner1D(partial(f, wait=False), bounds=(-1, 1)) |
|
169 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
93 | 170 |
|
94 | 171 |
# blocks until completion |
95 | 172 |
adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.01) |
... | ... |
@@ -107,7 +184,7 @@ non-blocking `adaptive.Runner`, you can use the |
107 | 184 |
|
108 | 185 |
from adaptive.runner import SequentialExecutor |
109 | 186 |
|
110 |
- learner = adaptive.Learner1D(partial(f, wait=False), bounds=(-1, 1)) |
|
187 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
111 | 188 |
|
112 | 189 |
runner = adaptive.Runner(learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.01) |
113 | 190 |
|
... | ... |
@@ -146,12 +223,11 @@ the runner. You can also stop the runner programatically using |
146 | 223 |
.. execute:: |
147 | 224 |
:hide-code: |
148 | 225 |
|
149 |
- import asyncio |
|
150 |
- await asyncio.sleep(3) # This is not needed in the notebook! |
|
226 |
+ await asyncio.sleep(0.1) # This is not needed in the notebook! |
|
151 | 227 |
|
152 | 228 |
.. execute:: |
153 | 229 |
|
154 |
- runner.cancel() # Let's execute this after 3 seconds |
|
230 |
+ runner.cancel() # Let's execute this after 0.1 seconds |
|
155 | 231 |
|
156 | 232 |
.. execute:: |
157 | 233 |
|
... | ... |
@@ -195,7 +271,6 @@ will raise an exception 10% of the time. |
195 | 271 |
.. execute:: |
196 | 272 |
:hide-code: |
197 | 273 |
|
198 |
- import asyncio |
|
199 | 274 |
await asyncio.sleep(4) # in 4 seconds it will surely have failed |
200 | 275 |
|
201 | 276 |
.. execute:: |
1 | 1 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,340 @@ |
1 |
+Advanced Topics |
|
2 |
+=============== |
|
3 |
+ |
|
4 |
+.. note:: |
|
5 |
+ Because this documentation consists of static html, the ``live_plot`` |
|
6 |
+ and ``live_info`` widget is not live. Download the notebook |
|
7 |
+ in order to see the real behaviour. |
|
8 |
+ |
|
9 |
+.. seealso:: |
|
10 |
+ The complete source code of this tutorial can be found in |
|
11 |
+ :jupyter-download:notebook:`advanced-topics` |
|
12 |
+ |
|
13 |
+.. execute:: |
|
14 |
+ :hide-code: |
|
15 |
+ :new-notebook: advanced-topics |
|
16 |
+ |
|
17 |
+ import adaptive |
|
18 |
+ adaptive.notebook_extension() |
|
19 |
+ |
|
20 |
+ from functools import partial |
|
21 |
+ import random |
|
22 |
+ |
|
23 |
+ offset = random.uniform(-0.5, 0.5) |
|
24 |
+ |
|
25 |
+ def f(x, offset=offset, wait=True): |
|
26 |
+ from time import sleep |
|
27 |
+ from random import random |
|
28 |
+ |
|
29 |
+ a = 0.01 |
|
30 |
+ if wait: |
|
31 |
+ sleep(random()) |
|
32 |
+ return x + a**2 / (a**2 + (x - offset)**2) |
|
33 |
+ |
|
34 |
+ |
|
35 |
+A watched pot never boils! |
|
36 |
+-------------------------- |
|
37 |
+ |
|
38 |
+`adaptive.Runner` does its work in an `asyncio` task that runs |
|
39 |
+concurrently with the IPython kernel, when using ``adaptive`` from a |
|
40 |
+Jupyter notebook. This is advantageous because it allows us to do things |
|
41 |
+like live-updating plots, however it can trip you up if you’re not |
|
42 |
+careful. |
|
43 |
+ |
|
44 |
+Notably: **if you block the IPython kernel, the runner will not do any |
|
45 |
+work**. |
|
46 |
+ |
|
47 |
+For example if you wanted to wait for a runner to complete, **do not |
|
48 |
+wait in a busy loop**: |
|
49 |
+ |
|
50 |
+.. code:: python |
|
51 |
+ |
|
52 |
+ while not runner.task.done(): |
|
53 |
+ pass |
|
54 |
+ |
|
55 |
+If you do this then **the runner will never finish**. |
|
56 |
+ |
|
57 |
+What to do if you don’t care about live plotting, and just want to run |
|
58 |
+something until its done? |
|
59 |
+ |
|
60 |
+The simplest way to accomplish this is to use |
|
61 |
+`adaptive.BlockingRunner`: |
|
62 |
+ |
|
63 |
+.. execute:: |
|
64 |
+ |
|
65 |
+ learner = adaptive.Learner1D(partial(f, wait=False), bounds=(-1, 1)) |
|
66 |
+ adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.01) |
|
67 |
+ # This will only get run after the runner has finished |
|
68 |
+ learner.plot() |
|
69 |
+ |
|
70 |
+Reproducibility |
|
71 |
+--------------- |
|
72 |
+ |
|
73 |
+By default ``adaptive`` runners evaluate the learned function in |
|
74 |
+parallel across several cores. The runners are also opportunistic, in |
|
75 |
+that as soon as a result is available they will feed it to the learner |
|
76 |
+and request another point to replace the one that just finished. |
|
77 |
+ |
|
78 |
+Because the order in which computations complete is non-deterministic, |
|
79 |
+this means that the runner behaves in a non-deterministic way. Adaptive |
|
80 |
+makes this choice because in many cases the speedup from parallel |
|
81 |
+execution is worth sacrificing the “purity” of exactly reproducible |
|
82 |
+computations. |
|
83 |
+ |
|
84 |
+Nevertheless it is still possible to run a learner in a deterministic |
|
85 |
+way with adaptive. |
|
86 |
+ |
|
87 |
+The simplest way is to use `adaptive.runner.simple` to run your |
|
88 |
+learner: |
|
89 |
+ |
|
90 |
+.. execute:: |
|
91 |
+ |
|
92 |
+ learner = adaptive.Learner1D(partial(f, wait=False), bounds=(-1, 1)) |
|
93 |
+ |
|
94 |
+ # blocks until completion |
|
95 |
+ adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.01) |
|
96 |
+ |
|
97 |
+ learner.plot() |
|
98 |
+ |
|
99 |
+Note that unlike `adaptive.Runner`, `adaptive.runner.simple` |
|
100 |
+*blocks* until it is finished. |
|
101 |
+ |
|
102 |
+If you want to enable determinism, want to continue using the |
|
103 |
+non-blocking `adaptive.Runner`, you can use the |
|
104 |
+`adaptive.runner.SequentialExecutor`: |
|
105 |
+ |
|
106 |
+.. execute:: |
|
107 |
+ |
|
108 |
+ from adaptive.runner import SequentialExecutor |
|
109 |
+ |
|
110 |
+ learner = adaptive.Learner1D(partial(f, wait=False), bounds=(-1, 1)) |
|
111 |
+ |
|
112 |
+ runner = adaptive.Runner(learner, executor=SequentialExecutor(), goal=lambda l: l.loss() < 0.01) |
|
113 |
+ |
|
114 |
+.. execute:: |
|
115 |
+ :hide-code: |
|
116 |
+ |
|
117 |
+ await runner.task # This is not needed in a notebook environment! |
|
118 |
+ |
|
119 |
+.. execute:: |
|
120 |
+ |
|
121 |
+ runner.live_info() |
|
122 |
+ |
|
123 |
+.. execute:: |
|
124 |
+ |
|
125 |
+ runner.live_plot(update_interval=0.1) |
|
126 |
+ |
|
127 |
+Cancelling a runner |
|
128 |
+------------------- |
|
129 |
+ |
|
130 |
+Sometimes you want to interactively explore a parameter space, and want |
|
131 |
+the function to be evaluated at finer and finer resolution and manually |
|
132 |
+control when the calculation stops. |
|
133 |
+ |
|
134 |
+If no ``goal`` is provided to a runner then the runner will run until |
|
135 |
+cancelled. |
|
136 |
+ |
|
137 |
+``runner.live_info()`` will provide a button that can be clicked to stop |
|
138 |
+the runner. You can also stop the runner programatically using |
|
139 |
+``runner.cancel()``. |
|
140 |
+ |
|
141 |
+.. execute:: |
|
142 |
+ |
|
143 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
144 |
+ runner = adaptive.Runner(learner) |
|
145 |
+ |
|
146 |
+.. execute:: |
|
147 |
+ :hide-code: |
|
148 |
+ |
|
149 |
+ import asyncio |
|
150 |
+ await asyncio.sleep(3) # This is not needed in the notebook! |
|
151 |
+ |
|
152 |
+.. execute:: |
|
153 |
+ |
|
154 |
+ runner.cancel() # Let's execute this after 3 seconds |
|
155 |
+ |
|
156 |
+.. execute:: |
|
157 |
+ |
|
158 |
+ runner.live_info() |
|
159 |
+ |
|
160 |
+.. execute:: |
|
161 |
+ |
|
162 |
+ runner.live_plot(update_interval=0.1) |
|
163 |
+ |
|
164 |
+.. execute:: |
|
165 |
+ |
|
166 |
+ print(runner.status()) |
|
167 |
+ |
|
168 |
+Debugging Problems |
|
169 |
+------------------ |
|
170 |
+ |
|
171 |
+Runners work in the background with respect to the IPython kernel, which |
|
172 |
+makes it convenient, but also means that inspecting errors is more |
|
173 |
+difficult because exceptions will not be raised directly in the |
|
174 |
+notebook. Often the only indication you will have that something has |
|
175 |
+gone wrong is that nothing will be happening. |
|
176 |
+ |
|
177 |
+Let’s look at the following example, where the function to be learned |
|
178 |
+will raise an exception 10% of the time. |
|
179 |
+ |
|
180 |
+.. execute:: |
|
181 |
+ |
|
182 |
+ def will_raise(x): |
|
183 |
+ from random import random |
|
184 |
+ from time import sleep |
|
185 |
+ |
|
186 |
+ sleep(random()) |
|
187 |
+ if random() < 0.1: |
|
188 |
+ raise RuntimeError('something went wrong!') |
|
189 |
+ return x**2 |
|
190 |
+ |
|
191 |
+ learner = adaptive.Learner1D(will_raise, (-1, 1)) |
|
192 |
+ runner = adaptive.Runner(learner) # without 'goal' the runner will run forever unless cancelled |
|
193 |
+ |
|
194 |
+ |
|
195 |
+.. execute:: |
|
196 |
+ :hide-code: |
|
197 |
+ |
|
198 |
+ import asyncio |
|
199 |
+ await asyncio.sleep(4) # in 4 seconds it will surely have failed |
|
200 |
+ |
|
201 |
+.. execute:: |
|
202 |
+ |
|
203 |
+ runner.live_info() |
|
204 |
+ |
|
205 |
+.. execute:: |
|
206 |
+ |
|
207 |
+ runner.live_plot() |
|
208 |
+ |
|
209 |
+The above runner should continue forever, but we notice that it stops |
|
210 |
+after a few points are evaluated. |
|
211 |
+ |
|
212 |
+First we should check that the runner has really finished: |
|
213 |
+ |
|
214 |
+.. execute:: |
|
215 |
+ |
|
216 |
+ runner.task.done() |
|
217 |
+ |
|
218 |
+If it has indeed finished then we should check the ``result`` of the |
|
219 |
+runner. This should be ``None`` if the runner stopped successfully. If |
|
220 |
+the runner stopped due to an exception then asking for the result will |
|
221 |
+raise the exception with the stack trace: |
|
222 |
+ |
|
223 |
+.. execute:: |
|
224 |
+ |
|
225 |
+ runner.task.result() |
|
226 |
+ |
|
227 |
+Logging runners |
|
228 |
+~~~~~~~~~~~~~~~ |
|
229 |
+ |
|
230 |
+Runners do their job in the background, which makes introspection quite |
|
231 |
+cumbersome. One way to inspect runners is to instantiate one with |
|
232 |
+``log=True``: |
|
233 |
+ |
|
234 |
+.. execute:: |
|
235 |
+ |
|
236 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
237 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.1, |
|
238 |
+ log=True) |
|
239 |
+ |
|
240 |
+.. execute:: |
|
241 |
+ :hide-code: |
|
242 |
+ |
|
243 |
+ await runner.task # This is not needed in a notebook environment! |
|
244 |
+ |
|
245 |
+.. execute:: |
|
246 |
+ |
|
247 |
+ runner.live_info() |
|
248 |
+ |
|
249 |
+This gives a the runner a ``log`` attribute, which is a list of the |
|
250 |
+``learner`` methods that were called, as well as their arguments. This |
|
251 |
+is useful because executors typically execute their tasks in a |
|
252 |
+non-deterministic order. |
|
253 |
+ |
|
254 |
+This can be used with `adaptive.runner.replay_log` to perfom the same |
|
255 |
+set of operations on another runner: |
|
256 |
+ |
|
257 |
+.. execute:: |
|
258 |
+ |
|
259 |
+ reconstructed_learner = adaptive.Learner1D(f, bounds=learner.bounds) |
|
260 |
+ adaptive.runner.replay_log(reconstructed_learner, runner.log) |
|
261 |
+ |
|
262 |
+.. execute:: |
|
263 |
+ |
|
264 |
+ learner.plot().Scatter.I.opts(style=dict(size=6)) * reconstructed_learner.plot() |
|
265 |
+ |
|
266 |
+Timing functions |
|
267 |
+~~~~~~~~~~~~~~~~ |
|
268 |
+ |
|
269 |
+To time the runner you **cannot** simply use |
|
270 |
+ |
|
271 |
+.. code:: python |
|
272 |
+ |
|
273 |
+ now = datetime.now() |
|
274 |
+ runner = adaptive.Runner(...) |
|
275 |
+ print(datetime.now() - now) |
|
276 |
+ |
|
277 |
+because this will be done immediately. Also blocking the kernel with |
|
278 |
+``while not runner.task.done()`` will not work because the runner will |
|
279 |
+not do anything when the kernel is blocked. |
|
280 |
+ |
|
281 |
+Therefore you need to create an ``async`` function and hook it into the |
|
282 |
+``ioloop`` like so: |
|
283 |
+ |
|
284 |
+.. execute:: |
|
285 |
+ |
|
286 |
+ import asyncio |
|
287 |
+ |
|
288 |
+ async def time(runner): |
|
289 |
+ from datetime import datetime |
|
290 |
+ now = datetime.now() |
|
291 |
+ await runner.task |
|
292 |
+ return datetime.now() - now |
|
293 |
+ |
|
294 |
+ ioloop = asyncio.get_event_loop() |
|
295 |
+ |
|
296 |
+ learner = adaptive.Learner1D(f, bounds=(-1, 1)) |
|
297 |
+ runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01) |
|
298 |
+ |
|
299 |
+ timer = ioloop.create_task(time(runner)) |
|
300 |
+ |
|
301 |
+.. execute:: |
|
302 |
+ :hide-code: |
|
303 |
+ |
|
304 |
+ await runner.task # This is not needed in a notebook environment! |
|
305 |
+ |
|
306 |
+.. execute:: |
|
307 |
+ |
|
308 |
+ # The result will only be set when the runner is done. |
|
309 |
+ timer.result() |
|
310 |
+ |
|
311 |
+Using Runners from a script |
|
312 |
+--------------------------- |
|
313 |
+ |
|
314 |
+Runners can also be used from a Python script independently of the |
|
315 |
+notebook. |
|
316 |
+ |
|
317 |
+The simplest way to accomplish this is simply to use the |
|
318 |
+`~adaptive.BlockingRunner`: |
|
319 |
+ |
|
320 |
+.. code:: python |
|
321 |
+ |
|
322 |
+ import adaptive |
|
323 |
+ |
|
324 |
+ def f(x): |
|
325 |
+ return x |
|
326 |
+ |
|
327 |
+ learner = adaptive.Learner1D(f, (-1, 1)) |
|
328 |
+ |
|
329 |
+ adaptive.BlockingRunner(learner, goal=lambda: l: l.loss() < 0.1) |
|
330 |
+ |
|
331 |
+If you use `asyncio` already in your script and want to integrate |
|
332 |
+``adaptive`` into it, then you can use the default `~adaptive.Runner` as you |
|
333 |
+would from a notebook. If you want to wait for the runner to finish, |
|
334 |
+then you can simply |
|
335 |
+ |
|
336 |
+.. code:: python |
|
337 |
+ |
|
338 |
+ await runner.task |
|
339 |
+ |
|
340 |
+from within a coroutine. |