... | ... |
@@ -17,6 +17,8 @@ class Runner: |
17 | 17 |
goal : callable, optional |
18 | 18 |
The end condition for the calculation. This function must take the |
19 | 19 |
learner as its sole argument, and return True if we should stop. |
20 |
+ log : bool, default: False |
|
21 |
+ If True, record the method calls made to the learner by this runner |
|
20 | 22 |
ioloop : asyncio.AbstractEventLoop, optional |
21 | 23 |
The ioloop in which to run the learning algorithm. If not provided, |
22 | 24 |
the default event loop is used. |
... | ... |
@@ -27,12 +29,17 @@ class Runner: |
27 | 29 |
The underlying task. May be cancelled to stop the runner. |
28 | 30 |
learner : Learner |
29 | 31 |
The underlying learner. May be queried for its state |
32 |
+ log : list or None |
|
33 |
+ Record of the method calls made to the learner, in the format |
|
34 |
+ '(method_name, *args)'. |
|
30 | 35 |
""" |
31 | 36 |
|
32 |
- def __init__(self, learner, executor=None, goal=None, *, ioloop=None): |
|
37 |
+ def __init__(self, learner, executor=None, goal=None, *, |
|
38 |
+ log=False, ioloop=None): |
|
33 | 39 |
self.ioloop = ioloop if ioloop else asyncio.get_event_loop() |
34 | 40 |
self.executor = _ensure_async_executor(executor, self.ioloop) |
35 | 41 |
self.learner = learner |
42 |
+ self.log = [] if log else None |
|
36 | 43 |
|
37 | 44 |
if goal is None: |
38 | 45 |
def goal(_): |
... | ... |
@@ -50,6 +57,7 @@ class Runner: |
50 | 57 |
first_completed = asyncio.FIRST_COMPLETED |
51 | 58 |
xs = dict() |
52 | 59 |
done = [None] * _get_executor_ncores(self.executor) |
60 |
+ do_log = self.log is not None |
|
53 | 61 |
|
54 | 62 |
if len(done) == 0: |
55 | 63 |
raise RuntimeError('Executor has no workers') |
... | ... |
@@ -58,6 +66,8 @@ class Runner: |
58 | 66 |
while not self.goal(self.learner): |
59 | 67 |
# Launch tasks to replace the ones that completed |
60 | 68 |
# on the last iteration. |
69 |
+ if do_log: |
|
70 |
+ self.log.append(('choose_points', len(done))) |
|
61 | 71 |
for x in self.learner.choose_points(len(done)): |
62 | 72 |
xs[self.executor.submit(self.learner.function, x)] = x |
63 | 73 |
|
... | ... |
@@ -69,6 +79,8 @@ class Runner: |
69 | 79 |
for fut in done: |
70 | 80 |
x = xs.pop(fut) |
71 | 81 |
y = await fut |
82 |
+ if do_log: |
|
83 |
+ self.log.append(('add_point', x, y)) |
|
72 | 84 |
self.learner.add_point(x, y) |
73 | 85 |
finally: |
74 | 86 |
# cancel any outstanding tasks |
... | ... |
@@ -77,6 +89,21 @@ class Runner: |
77 | 89 |
raise RuntimeError('Some futures remain uncancelled') |
78 | 90 |
|
79 | 91 |
|
92 |
+def replay_log(learner, log): |
|
93 |
+ """Apply a sequence of method calls to a learner. |
|
94 |
+ |
|
95 |
+ This is useful for debugging runners. |
|
96 |
+ |
|
97 |
+ Parameters |
|
98 |
+ ---------- |
|
99 |
+ learner : learner.BaseLearner |
|
100 |
+ log : list |
|
101 |
+ contains tuples: '(method_name, *args)'. |
|
102 |
+ """ |
|
103 |
+ for method, *args in log: |
|
104 |
+ getattr(learner, method)(*args) |
|
105 |
+ |
|
106 |
+ |
|
80 | 107 |
# Internal functionality |
81 | 108 |
|
82 | 109 |
class _AsyncExecutor: |
... | ... |
@@ -421,6 +421,60 @@ |
421 | 421 |
"runner.task.result()" |
422 | 422 |
] |
423 | 423 |
}, |
424 |
+ { |
|
425 |
+ "cell_type": "markdown", |
|
426 |
+ "metadata": {}, |
|
427 |
+ "source": [ |
|
428 |
+ "### Logging runners" |
|
429 |
+ ] |
|
430 |
+ }, |
|
431 |
+ { |
|
432 |
+ "cell_type": "markdown", |
|
433 |
+ "metadata": {}, |
|
434 |
+ "source": [ |
|
435 |
+ "Runners do their job in the background, which makes introspection quite cumbersome. One way to inspect runners is to instantiate one with `log=True`:" |
|
436 |
+ ] |
|
437 |
+ }, |
|
438 |
+ { |
|
439 |
+ "cell_type": "code", |
|
440 |
+ "execution_count": null, |
|
441 |
+ "metadata": {}, |
|
442 |
+ "outputs": [], |
|
443 |
+ "source": [ |
|
444 |
+ "learner = adaptive.learner.Learner1D(f, bounds=(-1, 1))\n", |
|
445 |
+ "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.1,\n", |
|
446 |
+ " log=True)\n", |
|
447 |
+ "adaptive.live_plot(runner)" |
|
448 |
+ ] |
|
449 |
+ }, |
|
450 |
+ { |
|
451 |
+ "cell_type": "markdown", |
|
452 |
+ "metadata": {}, |
|
453 |
+ "source": [ |
|
454 |
+ "This gives a the runner a `log` attribute, which is a list of the `learner` methods that were called, as well as their arguments. This is useful because executors typically execute their tasks in a non-deterministic order.\n", |
|
455 |
+ "\n", |
|
456 |
+ "This can be used with `adaptive.runner.replay_log` to perfom the same set of operations on another runner:\n" |
|
457 |
+ ] |
|
458 |
+ }, |
|
459 |
+ { |
|
460 |
+ "cell_type": "code", |
|
461 |
+ "execution_count": null, |
|
462 |
+ "metadata": {}, |
|
463 |
+ "outputs": [], |
|
464 |
+ "source": [ |
|
465 |
+ "reconstructed_learner = adaptive.learner.Learner1D(f, bounds=(-1, 1))\n", |
|
466 |
+ "adaptive.runner.replay_log(reconstructed_learner, runner.log)" |
|
467 |
+ ] |
|
468 |
+ }, |
|
469 |
+ { |
|
470 |
+ "cell_type": "code", |
|
471 |
+ "execution_count": null, |
|
472 |
+ "metadata": {}, |
|
473 |
+ "outputs": [], |
|
474 |
+ "source": [ |
|
475 |
+ "learner.plot().opts(style=dict(size=6)) * reconstructed_learner.plot()" |
|
476 |
+ ] |
|
477 |
+ }, |
|
424 | 478 |
{ |
425 | 479 |
"cell_type": "markdown", |
426 | 480 |
"metadata": {}, |