... | ... |
@@ -12,7 +12,7 @@ import scipy.spatial |
12 | 12 |
import pytest |
13 | 13 |
|
14 | 14 |
from ..learner import * |
15 |
-from ..runner import replay_log, BlockingRunner, SequentialExecutor |
|
15 |
+from ..runner import simple, replay_log |
|
16 | 16 |
|
17 | 17 |
|
18 | 18 |
def generate_random_parametrization(f): |
... | ... |
@@ -407,29 +407,53 @@ def test_termination_on_discontinuities(): |
407 | 407 |
|
408 | 408 |
def test_loss_at_machine_precision_interval_is_zero(): |
409 | 409 |
"""The loss of an interval smaller than _dx_eps |
410 |
- should be set to zero. If this is not the case, this test |
|
411 |
- will go on forever.""" |
|
410 |
+ should be set to zero.""" |
|
412 | 411 |
def f(x): |
413 | 412 |
return 1 if x == 0 else 0 |
414 | 413 |
|
414 |
+ def goal(l): |
|
415 |
+ return l.loss() < 0.01 or l.npoints >= 1000 |
|
416 |
+ |
|
415 | 417 |
learner = Learner1D(f, bounds=(-1, 1)) |
416 |
- ex = SequentialExecutor() |
|
417 |
- runner = BlockingRunner(learner, executor=ex, |
|
418 |
- goal=lambda l: l.loss() < 0.001) |
|
418 |
+ simple(learner, goal=goal) |
|
419 |
+ |
|
420 |
+ # this means loss < 0.01 was reached |
|
421 |
+ assert learner.npoints != 1000 |
|
419 | 422 |
|
420 | 423 |
|
421 | 424 |
def small_deviations(x): |
422 | 425 |
import random |
423 |
- return 0 if x < 1 else 1 + 10**(-random.randint(12, 14)) |
|
426 |
+ return 0 if x <= 1 else 1 + 10**(-random.randint(12, 14)) |
|
424 | 427 |
|
425 | 428 |
|
426 | 429 |
def test_small_deviations(): |
427 | 430 |
"""This tests whether the Learner1D can handle small deviations. |
428 | 431 |
See https://gitlab.kwant-project.org/qt/adaptive/merge_requests/73 and |
429 | 432 |
https://gitlab.kwant-project.org/qt/adaptive/issues/61.""" |
430 |
- learner = Learner1D(small_deviations, bounds=(0, 2)) |
|
431 |
- runner = BlockingRunner(learner, |
|
432 |
- goal=lambda l: l.npoints > 5000) |
|
433 |
+ |
|
434 |
+ eps = 5e-14 |
|
435 |
+ learner = Learner1D(small_deviations, bounds=(1 - eps, 1 + eps)) |
|
436 |
+ |
|
437 |
+ # Some non-determinism is needed to make this test fail so we keep |
|
438 |
+ # a list of points that will be evaluated later to emulate |
|
439 |
+ # parallel execution |
|
440 |
+ stash = [] |
|
441 |
+ |
|
442 |
+ for i in range(100): |
|
443 |
+ xs, _ = learner.ask(10) |
|
444 |
+ |
|
445 |
+ # Save 5 random points out of `xs` for later |
|
446 |
+ random.shuffle(xs) |
|
447 |
+ for _ in range(5): |
|
448 |
+ stash.append(xs.pop()) |
|
449 |
+ |
|
450 |
+ for x in xs: |
|
451 |
+ learner.tell(x, learner.function(x)) |
|
452 |
+ |
|
453 |
+ # Evaluate and add 5 random points from `stash` |
|
454 |
+ random.shuffle(stash) |
|
455 |
+ for _ in range(5): |
|
456 |
+ learner.tell(stash.pop(), learner.function(x)) |
|
433 | 457 |
|
434 | 458 |
|
435 | 459 |
@pytest.mark.xfail |