Browse code

remove _inline_js=False in adaptive.notebook_extension call

Bas Nijholt authored on 17/09/2020 22:19:56
Showing 1 changed files
... ...
@@ -48,7 +48,7 @@ on the *Play* :fa:`play` button or move the sliders.
48 48
     from adaptive.learner.learner1D import uniform_loss, default_loss
49 49
     import holoviews as hv
50 50
     import numpy as np
51
-    adaptive.notebook_extension(_inline_js=False)
51
+    adaptive.notebook_extension()
52 52
     %output holomap='scrubber'
53 53
 
54 54
 `adaptive.Learner1D`
Browse code

float -> int

Bas Nijholt authored on 08/04/2020 21:38:52
Showing 1 changed files
... ...
@@ -99,7 +99,7 @@ on the *Play* :fa:`play` button or move the sliders.
99 99
     def plot(learner, npoints):
100 100
         adaptive.runner.simple(learner, lambda l: l.npoints == npoints)
101 101
         learner2 = adaptive.Learner2D(ring, bounds=learner.bounds)
102
-        xs = ys = np.linspace(*learner.bounds[0], learner.npoints**0.5)
102
+        xs = ys = np.linspace(*learner.bounds[0], int(learner.npoints**0.5))
103 103
         xys = list(itertools.product(xs, ys))
104 104
         learner2.tell_many(xys, map(ring, xys))
105 105
         return (learner2.plot().relabel('homogeneous grid')
Browse code

do not inline the HoloViews JS

Bas Nijholt authored on 26/03/2019 12:44:31
Showing 1 changed files
... ...
@@ -48,7 +48,7 @@ on the *Play* :fa:`play` button or move the sliders.
48 48
     from adaptive.learner.learner1D import uniform_loss, default_loss
49 49
     import holoviews as hv
50 50
     import numpy as np
51
-    adaptive.notebook_extension()
51
+    adaptive.notebook_extension(_inline_js=False)
52 52
     %output holomap='scrubber'
53 53
 
54 54
 `adaptive.Learner1D`
Browse code

documentation improvements

Bas Nijholt authored on 19/10/2018 14:19:42
Showing 1 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,162 @@
1
+Implemented algorithms
2
+----------------------
3
+
4
+The core concept in ``adaptive`` is that of a *learner*. A *learner*
5
+samples a function at the best places in its parameter space to get
6
+maximum “information” about the function. As it evaluates the function
7
+at more and more points in the parameter space, it gets a better idea of
8
+where the best places are to sample next.
9
+
10
+Of course, what qualifies as the “best places” will depend on your
11
+application domain! ``adaptive`` makes some reasonable default choices,
12
+but the details of the adaptive sampling are completely customizable.
13
+
14
+The following learners are implemented:
15
+
16
+- `~adaptive.Learner1D`, for 1D functions ``f: ℝ → ℝ^N``,
17
+- `~adaptive.Learner2D`, for 2D functions ``f: ℝ^2 → ℝ^N``,
18
+- `~adaptive.LearnerND`, for ND functions ``f: ℝ^N → ℝ^M``,
19
+- `~adaptive.AverageLearner`, For stochastic functions where you want to
20
+  average the result over many evaluations,
21
+- `~adaptive.IntegratorLearner`, for
22
+  when you want to intergrate a 1D function ``f: ℝ → ℝ``.
23
+
24
+Meta-learners (to be used with other learners):
25
+
26
+- `~adaptive.BalancingLearner`, for when you want to run several learners at once,
27
+  selecting the “best” one each time you get more points,
28
+- `~adaptive.DataSaver`, for when your function doesn't just return a scalar or a vector.
29
+
30
+In addition to the learners, ``adaptive`` also provides primitives for
31
+running the sampling across several cores and even several machines,
32
+with built-in support for
33
+`concurrent.futures <https://docs.python.org/3/library/concurrent.futures.html>`_,
34
+`ipyparallel <https://ipyparallel.readthedocs.io/en/latest/>`_ and
35
+`distributed <https://distributed.readthedocs.io/en/latest/>`_.
36
+
37
+Examples
38
+--------
39
+
40
+Here are some examples of how Adaptive samples vs. homogeneous sampling. Click
41
+on the *Play* :fa:`play` button or move the sliders.
42
+
43
+.. jupyter-execute::
44
+    :hide-code:
45
+
46
+    import itertools
47
+    import adaptive
48
+    from adaptive.learner.learner1D import uniform_loss, default_loss
49
+    import holoviews as hv
50
+    import numpy as np
51
+    adaptive.notebook_extension()
52
+    %output holomap='scrubber'
53
+
54
+`adaptive.Learner1D`
55
+~~~~~~~~~~~~~~~~~~~~
56
+
57
+.. jupyter-execute::
58
+    :hide-code:
59
+
60
+    %%opts Layout [toolbar=None]
61
+    def f(x, offset=0.07357338543088588):
62
+        a = 0.01
63
+        return x + a**2 / (a**2 + (x - offset)**2)
64
+
65
+    def plot_loss_interval(learner):
66
+        if learner.npoints >= 2:
67
+            x_0, x_1 = max(learner.losses, key=learner.losses.get)
68
+            y_0, y_1 = learner.data[x_0], learner.data[x_1]
69
+            x, y = [x_0, x_1], [y_0, y_1]
70
+        else:
71
+            x, y = [], []
72
+        return hv.Scatter((x, y)).opts(style=dict(size=6, color='r'))
73
+
74
+    def plot(learner, npoints):
75
+        adaptive.runner.simple(learner, lambda l: l.npoints == npoints)
76
+        return (learner.plot() * plot_loss_interval(learner))[:, -1.1:1.1]
77
+
78
+    def get_hm(loss_per_interval, N=101):
79
+        learner = adaptive.Learner1D(f, bounds=(-1, 1),
80
+                                     loss_per_interval=loss_per_interval)
81
+        plots = {n: plot(learner, n) for n in range(N)}
82
+        return hv.HoloMap(plots, kdims=['npoints'])
83
+
84
+    (get_hm(uniform_loss).relabel('homogeneous samping')
85
+     + get_hm(default_loss).relabel('with adaptive'))
86
+
87
+`adaptive.Learner2D`
88
+~~~~~~~~~~~~~~~~~~~~
89
+
90
+.. jupyter-execute::
91
+    :hide-code:
92
+
93
+    def ring(xy):
94
+        import numpy as np
95
+        x, y = xy
96
+        a = 0.2
97
+        return x + np.exp(-(x**2 + y**2 - 0.75**2)**2/a**4)
98
+
99
+    def plot(learner, npoints):
100
+        adaptive.runner.simple(learner, lambda l: l.npoints == npoints)
101
+        learner2 = adaptive.Learner2D(ring, bounds=learner.bounds)
102
+        xs = ys = np.linspace(*learner.bounds[0], learner.npoints**0.5)
103
+        xys = list(itertools.product(xs, ys))
104
+        learner2.tell_many(xys, map(ring, xys))
105
+        return (learner2.plot().relabel('homogeneous grid')
106
+                + learner.plot().relabel('with adaptive')
107
+                + learner2.plot(tri_alpha=0.5).relabel('homogeneous sampling')
108
+                + learner.plot(tri_alpha=0.5).relabel('with adaptive')).cols(2)
109
+
110
+    learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])
111
+    plots = {n: plot(learner, n) for n in range(4, 1010, 20)}
112
+    hv.HoloMap(plots, kdims=['npoints']).collate()
113
+
114
+`adaptive.AverageLearner`
115
+~~~~~~~~~~~~~~~~~~~~~~~~~
116
+
117
+.. jupyter-execute::
118
+    :hide-code:
119
+
120
+    def g(n):
121
+        import random
122
+        random.seed(n)
123
+        val = random.gauss(0.5, 0.5)
124
+        return val
125
+
126
+    learner = adaptive.AverageLearner(g, atol=None, rtol=0.01)
127
+
128
+    def plot(learner, npoints):
129
+        adaptive.runner.simple(learner, lambda l: l.npoints == npoints)
130
+        return learner.plot().relabel(f'loss={learner.loss():.2f}')
131
+
132
+    plots = {n: plot(learner, n) for n in range(10, 10000, 200)}
133
+    hv.HoloMap(plots, kdims=['npoints'])
134
+
135
+`adaptive.LearnerND`
136
+~~~~~~~~~~~~~~~~~~~~
137
+
138
+.. jupyter-execute::
139
+    :hide-code:
140
+
141
+    def sphere(xyz):
142
+        import numpy as np
143
+        x, y, z = xyz
144
+        a = 0.4
145
+        return np.exp(-(x**2 + y**2 + z**2 - 0.75**2)**2/a**4)
146
+
147
+    learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)])
148
+    adaptive.runner.simple(learner, lambda l: l.npoints == 3000)
149
+
150
+    learner.plot_3D()
151
+
152
+see more in the :ref:`Tutorial Adaptive`.
153
+
154
+.. include:: ../../README.rst
155
+    :start-after: not-in-documentation-end
156
+    :end-before: credits-end
157
+
158
+.. mdinclude:: ../../AUTHORS.md
159
+
160
+.. include:: ../../README.rst
161
+    :start-after: credits-end
162
+    :end-before: references-start