... | ... |
@@ -684,7 +684,7 @@ def finite_loss(ival, loss, x_scale): |
684 | 684 |
sort intervals that have infinite loss.""" |
685 | 685 |
# If the loss is infinite we return the |
686 | 686 |
# distance between the two points. |
687 |
- if math.isinf(loss): |
|
687 |
+ if math.isinf(loss) or math.isnan(loss): |
|
688 | 688 |
loss = (ival[1] - ival[0]) / x_scale |
689 | 689 |
if len(ival) == 3: |
690 | 690 |
# Used when constructing quals. Last item is |
... | ... |
@@ -363,3 +363,15 @@ def test_curvature_loss_vectors(): |
363 | 363 |
learner = Learner1D(f, (-1, 1), loss_per_interval=loss) |
364 | 364 |
simple(learner, goal=lambda l: l.npoints > 100) |
365 | 365 |
assert learner.npoints > 100 |
366 |
+ |
|
367 |
+ |
|
368 |
+def test_NaN_loss(): |
|
369 |
+ # see https://github.com/python-adaptive/adaptive/issues/145 |
|
370 |
+ def f(x): |
|
371 |
+ a = 0.01 |
|
372 |
+ if random.random() < 0.2: |
|
373 |
+ return np.NaN |
|
374 |
+ return x + a**2 / (a**2 + x**2) |
|
375 |
+ |
|
376 |
+ learner = Learner1D(f, bounds=(-1, 1)) |
|
377 |
+ simple(learner, lambda l: l.npoints > 100) |