Browse code

Merge branch 'tests' into 'master'

setup CI and tests

See merge request qt/adaptive!3

Bas Nijholt authored on 20/11/2017 15:53:37
Showing 7 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,4 @@
1
+image: quantumtinkerer/research
2
+test:
3
+  script:
4
+     - py.test adaptive
0 5
new file mode 100644
1 6
similarity index 98%
2 7
rename from algorithm_4.py
3 8
rename to adaptive/tests/algorithm_4.py
... ...
@@ -2,6 +2,7 @@
2 2
 # Copyright 2017 Christoph Groth
3 3
 
4 4
 import warnings
5
+import functools
5 6
 from fractions import Fraction as Frac
6 7
 from collections import defaultdict
7 8
 import numpy as np
... ...
@@ -412,10 +413,23 @@ def algorithm_4 (f, a, b, tol):
412 413
 
413 414
 ################ Tests ################
414 415
 
416
+def silence_warnings(f):
417
+
418
+    @functools.wraps(f)
419
+    def _(*args, **kwargs):
420
+        with warnings.catch_warnings():
421
+            warnings.simplefilter("ignore")
422
+            return f(*args, **kwargs)
423
+
424
+    return _
425
+
426
+
427
+@silence_warnings
415 428
 def f0(x):
416 429
     return x * np.sin(1/x) * np.sqrt(abs(1 - x))
417 430
 
418 431
 
432
+@silence_warnings
419 433
 def f7(x):
420 434
     return x**-0.5
421 435
 
... ...
@@ -431,10 +445,12 @@ def f21(x):
431 445
     return y
432 446
 
433 447
 
448
+@silence_warnings
434 449
 def f63(x):
435 450
     return abs(x - 0.987654321)**-0.45
436 451
 
437 452
 
453
+@silence_warnings
438 454
 def fdiv(x):
439 455
     return abs(x - 0.987654321)**-1.1
440 456
 
441 457
new file mode 100644
... ...
@@ -0,0 +1,71 @@
1
+# -*- coding: utf-8 -*-
2
+
3
+import numpy as np
4
+import pytest
5
+from ..learner import IntegratorLearner
6
+from ..learner.integrator_learner import DivergentIntegralError
7
+from .algorithm_4 import algorithm_4, f0, f7, f21, f24, f63, fdiv
8
+from .algorithm_4 import DivergentIntegralError as A4DivergentIntegralError
9
+
10
+
11
+def run_integrator_learner(f, a, b, tol, nr_points):
12
+    learner = IntegratorLearner(f, bounds=(a, b), tol=tol)
13
+    for _ in range(nr_points):
14
+        points, _ = learner.choose_points(1)
15
+        learner.add_data(points, map(learner.function, points))
16
+    return learner
17
+
18
+
19
+def same_ivals(f, a, b, tol):
20
+        igral, err, nr_points, ivals = algorithm_4(f, a, b, tol)
21
+
22
+        learner = run_integrator_learner(f, a, b, tol, nr_points)
23
+
24
+        # This will only show up if the test fails, anyway
25
+        print('igral difference', learner.igral-igral,
26
+              'err difference', learner.err - err)
27
+
28
+        return learner.equal(ivals, verbose=True)
29
+
30
+
31
+def test_cquad():
32
+    for i, args in enumerate([[f0, 0, 3, 1e-5],
33
+                              [f7, 0, 1, 1e-6],
34
+                              [f21, 0, 1, 1e-3],
35
+                              [f24, 0, 3, 1e-3]]):
36
+        assert same_ivals(*args), 'Function {}'.format(i)
37
+
38
+
39
+@pytest.mark.xfail
40
+def test_machine_precision():
41
+    f, a, b, tol = [f63, 0, 1, 1e-10]
42
+    igral, err, nr_points, ivals = algorithm_4(f, a, b, tol)
43
+
44
+    learner = run_integrator_learner(f, a, b, tol, nr_points)
45
+
46
+    print('igral difference', learner.igral-igral,
47
+          'err difference', learner.err - err)
48
+
49
+    assert learner.equal(ivals, verbose=True)
50
+
51
+
52
+def test_machine_precision2():
53
+    f, a, b, tol = [f63, 0, 1, 1e-10]
54
+    igral, err, nr_points, ivals = algorithm_4(f, a, b, tol)
55
+
56
+    learner = run_integrator_learner(f, a, b, tol, nr_points)
57
+
58
+    np.testing.assert_almost_equal(igral, learner.igral)
59
+    np.testing.assert_almost_equal(err, learner.err)
60
+
61
+
62
+def test_divergence():
63
+    """This function should raise a DivergentIntegralError."""
64
+    f, a, b, tol = fdiv, 0, 1, 1e-6
65
+    with pytest.raises(A4DivergentIntegralError) as e:
66
+        igral, err, nr_points, ivals = algorithm_4(f, a, b, tol)
67
+
68
+    nr_points = e.value.nr_points
69
+
70
+    with pytest.raises(DivergentIntegralError):
71
+        learner = run_integrator_learner(f, a, b, tol, nr_points)
0 72
new file mode 100644
... ...
@@ -0,0 +1,304 @@
1
+# -*- coding: utf-8 -*-
2
+
3
+import collections
4
+import inspect
5
+import itertools as it
6
+import functools as ft
7
+import random
8
+import math
9
+import numpy as np
10
+import scipy.spatial
11
+
12
+import pytest
13
+
14
+from ..learner import *
15
+
16
+
17
+def generate_random_parametrization(f):
18
+    """Return a realization of 'f' with parameters bound to random values.
19
+
20
+    Parameters
21
+    ----------
22
+    f : callable
23
+        All parameters but the first must be annotated with a callable
24
+        that, when called with no arguments, produces a value of the
25
+        appropriate type for the parameter in question.
26
+    """
27
+    _, *params = inspect.signature(f).parameters.items()
28
+    if any(not callable(v.annotation) for (p, v) in params):
29
+        raise TypeError('All parameters to {} must be annotated with functions.'
30
+                        .format(f.__name__))
31
+    realization = {p: v.annotation() for (p, v) in params}
32
+    return ft.partial(f, **realization)
33
+
34
+
35
+def uniform(a, b):
36
+    return lambda: random.uniform(a, b)
37
+
38
+
39
+# Library of functions and associated learners.
40
+
41
+learner_function_combos = collections.defaultdict(list)
42
+
43
+def learn_with(learner_type, **init_kwargs):
44
+
45
+    def _(f):
46
+        learner_function_combos[learner_type].append((f, init_kwargs))
47
+        return f
48
+
49
+    return _
50
+
51
+
52
+# All parameters except the first must be annotated with a callable that
53
+# returns a random value for that parameter.
54
+
55
+
56
+@learn_with(Learner1D, bounds=(-1, 1))
57
+def linear(x, m: uniform(0, 10)):
58
+    return m * x
59
+
60
+
61
+@learn_with(Learner1D, bounds=(-1, 1))
62
+def linear_with_peak(x, d: uniform(-1, 1)):
63
+    a = 0.01
64
+    return x + a**2 / (a**2 + (x - d)**2)
65
+
66
+
67
+@learn_with(Learner2D, bounds=((-1, 1), (-1, 1)))
68
+def ring_of_fire(xy, d: uniform(0.2, 1)):
69
+    a = 0.2
70
+    x, y = xy
71
+    return x + math.exp(-(x**2 + y**2 - d**2)**2 / a**4)
72
+
73
+
74
+@learn_with(AverageLearner, rtol=1)
75
+def gaussian(n):
76
+    return random.gauss(0, 1)
77
+
78
+
79
+# Decorators for tests.
80
+
81
+
82
+def run_with(*learner_types):
83
+    return pytest.mark.parametrize(
84
+        'learner_type, f, learner_kwargs',
85
+        [(l, f, dict(k))
86
+         for l in learner_types
87
+         for f, k in learner_function_combos[l]]
88
+    )
89
+
90
+
91
+def choose_points_randomly(learner, rounds, points):
92
+    n_rounds = random.randrange(*rounds)
93
+    n_points = [random.randrange(*points) for _ in range(n_rounds)]
94
+
95
+    xs = []
96
+    ls = []
97
+    for n in n_points:
98
+        x, l = learner.choose_points(n)
99
+        xs.extend(x)
100
+        ls.extend(l)
101
+
102
+    return xs, ls
103
+
104
+
105
+@run_with(Learner1D)
106
+def test_uniform_sampling1D(learner_type, f, learner_kwargs):
107
+    """Points are sampled uniformly if no data is provided.
108
+
109
+    Non-uniform sampling implies that we think we know something about
110
+    the function, which we do not in the absence of data.
111
+    """
112
+    f = generate_random_parametrization(f)
113
+    learner = learner_type(f, **learner_kwargs)
114
+
115
+    points, _ = choose_points_randomly(learner, (10, 20), (10, 20))
116
+
117
+    points.sort()
118
+    ivals = np.diff(sorted(points))
119
+    assert max(ivals) / min(ivals) < 2 + 1e-8
120
+
121
+
122
+@pytest.mark.xfail
123
+@run_with(Learner2D)
124
+def test_uniform_sampling2D(learner_type, f, learner_kwargs):
125
+    """Points are sampled uniformly if no data is provided.
126
+
127
+    Non-uniform sampling implies that we think we know something about
128
+    the function, which we do not in the absence of data.
129
+    """
130
+    f = generate_random_parametrization(f)
131
+    learner = learner_type(f, **learner_kwargs)
132
+
133
+    points, _ = choose_points_randomly(learner, (70, 100), (10, 20))
134
+    tree = scipy.spatial.cKDTree(points)
135
+
136
+    # regular grid
137
+    n = math.sqrt(len(points))
138
+    xbounds, ybounds = learner_kwargs['bounds']
139
+    r = math.sqrt((ybounds[1] - ybounds[0]) / (xbounds[1] - xbounds[0]))
140
+    xs, dx = np.linspace(*xbounds, int(n / r), retstep=True)
141
+    ys, dy = np.linspace(*ybounds, int(n * r), retstep=True)
142
+
143
+    distances, neighbors = tree.query(list(it.product(xs, ys)), k=1)
144
+    assert max(distances) < math.sqrt(dx**2 + dy**2)
145
+
146
+
147
+@run_with(Learner1D, Learner2D)
148
+def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs):
149
+    """Adding already existing data is an idempotent operation.
150
+
151
+    Either it is idempotent, or it is an error.
152
+    This is the only sane behaviour.
153
+    """
154
+    f = generate_random_parametrization(f)
155
+    learner = learner_type(f, **learner_kwargs)
156
+    control = learner_type(f, **learner_kwargs)
157
+
158
+    N = random.randint(10, 30)
159
+    control.choose_points(N)
160
+    xs, _ = learner.choose_points(N)
161
+    points = [(x, f(x)) for x in xs]
162
+
163
+    for p in points:
164
+        control.add_point(*p)
165
+        learner.add_point(*p)
166
+
167
+    random.shuffle(points)
168
+    for p in points:
169
+        learner.add_point(*p)
170
+
171
+    M = random.randint(10, 30)
172
+    pls = zip(*learner.choose_points(M))
173
+    cpls = zip(*control.choose_points(M))
174
+    # Point ordering is not defined, so compare as sets
175
+    assert set(pls) == set(cpls)
176
+
177
+
178
+@run_with(Learner1D, Learner2D, AverageLearner)
179
+def test_adding_non_chosen_data(learner_type, f, learner_kwargs):
180
+    """Adding data for a point that was not returned by 'choose_points'."""
181
+    # XXX: learner, control and bounds are not defined
182
+    f = generate_random_parametrization(f)
183
+    learner = learner_type(f, **learner_kwargs)
184
+    control = learner_type(f, **learner_kwargs)
185
+
186
+    N = random.randint(10, 30)
187
+    xs, _ = control.choose_points(N)
188
+
189
+    for x in xs:
190
+        control.add_point(x, f(x))
191
+        learner.add_point(x, f(x))
192
+
193
+    M = random.randint(10, 30)
194
+    pls = zip(*learner.choose_points(M))
195
+    cpls = zip(*control.choose_points(M))
196
+    # Point ordering within a single call to 'choose_points'
197
+    # is not guaranteed to be the same by the API.
198
+    assert set(pls) == set(cpls)
199
+
200
+
201
+@run_with(Learner1D, Learner2D, AverageLearner)
202
+def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs):
203
+    """The order of calls to 'add_points' between calls to
204
+       'choose_points' is arbitrary."""
205
+    f = generate_random_parametrization(f)
206
+    learner = learner_type(f, **learner_kwargs)
207
+    control = learner_type(f, **learner_kwargs)
208
+
209
+    N = random.randint(10, 30)
210
+    control.choose_points(N)
211
+    xs, _ = learner.choose_points(N)
212
+    points = [(x, f(x)) for x in xs]
213
+
214
+    for p in points:
215
+        control.add_point(*p)
216
+
217
+    random.shuffle(points)
218
+    for p in points:
219
+        learner.add_point(*p)
220
+
221
+    M = random.randint(10, 30)
222
+    pls = zip(*learner.choose_points(M))
223
+    cpls = zip(*control.choose_points(M))
224
+    # Point ordering within a single call to 'choose_points'
225
+    # is not guaranteed to be the same by the API.
226
+    assert set(pls) == set(cpls)
227
+
228
+
229
+@run_with(Learner1D, Learner2D, AverageLearner)
230
+def test_expected_loss_improvement_is_less_than_total_loss(learner_type, f, learner_kwargs):
231
+    """The estimated loss improvement can never be greater than the total loss."""
232
+    f = generate_random_parametrization(f)
233
+    learner = learner_type(f, **learner_kwargs)
234
+    N = random.randint(50, 100)
235
+    xs, loss_improvements = learner.choose_points(N)
236
+
237
+    for x in xs:
238
+        learner.add_point(x, f(x))
239
+
240
+    M = random.randint(50, 100)
241
+    _, loss_improvements = learner.choose_points(M)
242
+
243
+    assert sum(loss_improvements) < learner.loss()
244
+
245
+
246
+@pytest.mark.xfail
247
+@run_with(Learner1D, Learner2D)
248
+def test_learner_subdomain(learner_type, f, learner_kwargs):
249
+    """Learners that never receive data outside of a subdomain should
250
+       perform 'similarly' to learners defined on that subdomain only."""
251
+    # XXX: not sure how to implement this. How do we measure "performance"?
252
+    raise NotImplementedError()
253
+
254
+
255
+@run_with(Learner1D, Learner2D)
256
+def test_learner_performance_is_invariant_under_scaling(learner_type, f, learner_kwargs):
257
+    """Learners behave identically under transformations that leave
258
+       the loss invariant.
259
+
260
+    This is a statement that the learner makes decisions based solely
261
+    on the loss function.
262
+    """
263
+    # for now we just scale X and Y by random factors
264
+    f = generate_random_parametrization(f)
265
+
266
+    control_kwargs = dict(learner_kwargs)
267
+    control = learner_type(f, **control_kwargs)
268
+
269
+    xscale = 1000 * random.random()
270
+    yscale = 1000 * random.random()
271
+
272
+    l_kwargs = dict(learner_kwargs)
273
+    l_kwargs['bounds'] = xscale * np.array(l_kwargs['bounds'])
274
+    learner = learner_type(lambda x: yscale * f(x),
275
+                           **l_kwargs)
276
+
277
+    nrounds = random.randrange(50, 100)
278
+    npoints = [random.randrange(1, 10) for _ in range(nrounds)]
279
+
280
+    control_points = []
281
+    for n in npoints:
282
+        cxs, _ = control.choose_points(n)
283
+        xs, _ = learner.choose_points(n)
284
+        # Point ordering within a single call to 'choose_points'
285
+        # is not guaranteed to be the same by the API.
286
+        # Also, points will only be equal up to a tolerance, due to rounding
287
+        should_be = sorted(cxs)
288
+        to_check = np.array(sorted(xs)) / xscale
289
+        assert np.allclose(should_be, to_check)
290
+
291
+        control.add_data(cxs, [control.function(x) for x in cxs])
292
+        learner.add_data(xs, [learner.function(x) for x in xs])
293
+
294
+
295
+@pytest.mark.xfail
296
+@run_with(Learner1D, Learner2D)
297
+def test_convergence_for_arbitrary_ordering(learner_type, f, learner_kwargs):
298
+    """Learners that are learning the same function should converge
299
+    to the same result "eventually" if given the same data, regardless
300
+    of the order in which that data is given.
301
+    """
302
+    # XXX: not sure how to implement this. Can we say anything at all about
303
+    #      the scaling of the loss with the number of points?
304
+    raise NotImplementedError()
0 305
new file mode 100644
... ...
@@ -0,0 +1,2 @@
1
+[pytest]
2
+testpaths = adaptive
0 3
deleted file mode 100644
... ...
@@ -1,66 +0,0 @@
1
-import numpy as np
2
-from adaptive.learner import IntegratorLearner
3
-from algorithm_4 import algorithm_4
4
-
5
-def same_ivals(f, a, b, tol, verbose):
6
-        igral, err, nr_points, ivals = algorithm_4(f, a, b, tol)
7
-
8
-        learner = IntegratorLearner(f, bounds=(a, b), tol=tol)
9
-        for i in range(nr_points):
10
-            points, loss_improvement = learner.choose_points(1)
11
-            learner.add_data(points, map(learner.function, points))
12
-        if verbose:
13
-            print('igral diff, ', learner.igral-igral, 'err diff', learner.err - err)
14
-        return learner.equal(ivals, verbose=verbose)
15
-
16
-
17
-def same_ivals_up_to(f, a, b, tol):
18
-        igral, err, nr_points, ivals = algorithm_4(f, a, b, tol)
19
-
20
-        learner = IntegratorLearner(f, bounds=(a, b), tol=tol)
21
-        j = 0
22
-        equal_till = 0
23
-        for i in range(nr_points):
24
-            points, loss_improvement = learner.choose_points(1)
25
-            learner.add_data(points, map(learner.function, points))
26
-            if not learner._stack:
27
-                try:
28
-                    j += 1
29
-                    if learner.equal(ivals):
30
-                        equal_till = i + 1
31
-                except:
32
-                    all_equal = False
33
-
34
-        return 'equal_till nr_points={} of {}'.format(equal_till, nr_points)
35
-
36
-if __name__ == '__main__':
37
-    old_settings = np.seterr(all='ignore')
38
-    from algorithm_4 import f0, f7, f24, f21, f63, fdiv
39
-    for i, args in enumerate([[f0, 0, 3, 1e-5],
40
-                              [f7, 0, 1, 1e-6],
41
-                              [f21, 0, 1, 1e-3],
42
-                              [f24, 0, 3, 1e-3],
43
-                              [f63, 0, 1, 1e-10]]):
44
-        print('\nFunction {}'.format(i))
45
-        if same_ivals(*args, verbose=True):
46
-            print(True)
47
-        else:
48
-            print(same_ivals_up_to(*args))
49
-    
50
-    # This function should raise a DivergentIntegralError.
51
-    print('Function ', i+1)
52
-    f, a, b, tol = [fdiv, 0, 1, 1e-6]
53
-    try:
54
-        igral, err, nr_points, ivals = algorithm_4(f, a, b, tol)
55
-    except Exception:
56
-        print('The integral is diverging.')
57
-
58
-    try:
59
-        learner = IntegratorLearner(f, bounds=(a, b), tol=tol)
60
-        for i in range(nr_points):
61
-            points, loss_improvement = learner.choose_points(1)
62
-            learner.add_data(points, map(learner.function, points))
63
-    except Exception:
64
-        print('The integral is diverging.')
65
-
66
-    np.seterr(**old_settings)