1D: increase _dx_eps
See merge request qt/adaptive!73
... | ... |
@@ -109,7 +109,7 @@ class Learner1D(BaseLearner): |
109 | 109 |
self._oldscale = deepcopy(self._scale) |
110 | 110 |
|
111 | 111 |
# The precision in 'x' below which we set losses to 0. |
112 |
- self._dx_eps = max(np.abs(bounds)) * np.finfo(float).eps |
|
112 |
+ self._dx_eps = 2 * max(np.abs(bounds)) * np.finfo(float).eps |
|
113 | 113 |
|
114 | 114 |
self.bounds = list(bounds) |
115 | 115 |
|
... | ... |
@@ -125,42 +125,42 @@ class Learner1D(BaseLearner): |
125 | 125 |
|
126 | 126 |
def loss(self, real=True): |
127 | 127 |
losses = self.losses if real else self.losses_combined |
128 |
- if len(losses) == 0: |
|
129 |
- return float('inf') |
|
130 |
- else: |
|
131 |
- return max(losses.values()) |
|
128 |
+ return max(losses.values()) if len(losses) > 0 else float('inf') |
|
132 | 129 |
|
133 |
- def update_interpolated_losses_in_interval(self, x_lower, x_upper): |
|
134 |
- if x_lower is not None and x_upper is not None: |
|
135 |
- dx = x_upper - x_lower |
|
136 |
- loss = self.loss_per_interval((x_lower, x_upper), self._scale, self.data) |
|
137 |
- self.losses[x_lower, x_upper] = loss if abs(dx) > self._dx_eps else 0 |
|
130 |
+ def update_interpolated_loss_in_interval(self, x_left, x_right): |
|
131 |
+ if x_left is not None and x_right is not None: |
|
132 |
+ dx = x_right - x_left |
|
133 |
+ if dx < self._dx_eps: |
|
134 |
+ loss = 0 |
|
135 |
+ else: |
|
136 |
+ loss = self.loss_per_interval((x_left, x_right), |
|
137 |
+ self._scale, self.data) |
|
138 |
+ self.losses[x_left, x_right] = loss |
|
138 | 139 |
|
139 |
- start = self.neighbors_combined.bisect_right(x_lower) |
|
140 |
- end = self.neighbors_combined.bisect_left(x_upper) |
|
140 |
+ start = self.neighbors_combined.bisect_right(x_left) |
|
141 |
+ end = self.neighbors_combined.bisect_left(x_right) |
|
141 | 142 |
for i in range(start, end): |
142 |
- a, b = self.neighbors_combined.iloc[i], self.neighbors_combined.iloc[i + 1] |
|
143 |
- self.losses_combined[a, b] = (b - a) * self.losses[x_lower, x_upper] / dx |
|
143 |
+ a, b = (self.neighbors_combined.iloc[i], |
|
144 |
+ self.neighbors_combined.iloc[i + 1]) |
|
145 |
+ self.losses_combined[a, b] = (b - a) * loss / dx |
|
144 | 146 |
if start == end: |
145 |
- self.losses_combined[x_lower, x_upper] = self.losses[x_lower, x_upper] |
|
147 |
+ self.losses_combined[x_left, x_right] = loss |
|
146 | 148 |
|
147 | 149 |
def update_losses(self, x, real=True): |
148 | 150 |
if real: |
149 |
- x_lower, x_upper = self.get_neighbors(x, self.neighbors) |
|
150 |
- self.update_interpolated_losses_in_interval(x_lower, x) |
|
151 |
- self.update_interpolated_losses_in_interval(x, x_upper) |
|
152 |
- self.losses.pop((x_lower, x_upper), None) |
|
151 |
+ x_left, x_right = self.find_neighbors(x, self.neighbors) |
|
152 |
+ self.update_interpolated_loss_in_interval(x_left, x) |
|
153 |
+ self.update_interpolated_loss_in_interval(x, x_right) |
|
154 |
+ self.losses.pop((x_left, x_right), None) |
|
153 | 155 |
else: |
154 | 156 |
losses_combined = self.losses_combined |
155 |
- x_lower, x_upper = self.get_neighbors(x, self.neighbors) |
|
156 |
- a, b = self.get_neighbors(x, self.neighbors_combined) |
|
157 |
- if x_lower is not None and x_upper is not None: |
|
158 |
- dx = x_upper - x_lower |
|
159 |
- loss = self.losses[x_lower, x_upper] |
|
160 |
- losses_combined[a, x] = ((x - a) * loss / dx |
|
161 |
- if abs(x - a) > self._dx_eps else 0) |
|
162 |
- losses_combined[x, b] = ((b - x) * loss / dx |
|
163 |
- if abs(b - x) > self._dx_eps else 0) |
|
157 |
+ x_left, x_right = self.find_neighbors(x, self.neighbors) |
|
158 |
+ a, b = self.find_neighbors(x, self.neighbors_combined) |
|
159 |
+ if x_left is not None and x_right is not None: |
|
160 |
+ dx = x_right - x_left |
|
161 |
+ loss = self.losses[x_left, x_right] |
|
162 |
+ losses_combined[a, x] = (x - a) * loss / dx |
|
163 |
+ losses_combined[x, b] = (b - x) * loss / dx |
|
164 | 164 |
else: |
165 | 165 |
if a is not None: |
166 | 166 |
losses_combined[a, x] = float('inf') |
... | ... |
@@ -169,23 +169,20 @@ class Learner1D(BaseLearner): |
169 | 169 |
|
170 | 170 |
losses_combined.pop((a, b), None) |
171 | 171 |
|
172 |
- def get_neighbors(self, x, neighbors): |
|
172 |
+ def find_neighbors(self, x, neighbors): |
|
173 | 173 |
if x in neighbors: |
174 | 174 |
return neighbors[x] |
175 |
- return self.find_neighbors(x, neighbors) |
|
176 |
- |
|
177 |
- def find_neighbors(self, x, neighbors): |
|
178 | 175 |
pos = neighbors.bisect_left(x) |
179 |
- x_lower = neighbors.iloc[pos-1] if pos != 0 else None |
|
180 |
- x_upper = neighbors.iloc[pos] if pos != len(neighbors) else None |
|
181 |
- return x_lower, x_upper |
|
176 |
+ x_left = neighbors.iloc[pos - 1] if pos != 0 else None |
|
177 |
+ x_right = neighbors.iloc[pos] if pos != len(neighbors) else None |
|
178 |
+ return x_left, x_right |
|
182 | 179 |
|
183 | 180 |
def update_neighbors(self, x, neighbors): |
184 | 181 |
if x not in neighbors: # The point is new |
185 |
- x_lower, x_upper = self.find_neighbors(x, neighbors) |
|
186 |
- neighbors[x] = [x_lower, x_upper] |
|
187 |
- neighbors.get(x_lower, [None, None])[1] = x |
|
188 |
- neighbors.get(x_upper, [None, None])[0] = x |
|
182 |
+ x_left, x_right = self.find_neighbors(x, neighbors) |
|
183 |
+ neighbors[x] = [x_left, x_right] |
|
184 |
+ neighbors.get(x_left, [None, None])[1] = x |
|
185 |
+ neighbors.get(x_right, [None, None])[0] = x |
|
189 | 186 |
|
190 | 187 |
def update_scale(self, x, y): |
191 | 188 |
"""Update the scale with which the x and y-values are scaled. |
... | ... |
@@ -247,11 +244,10 @@ class Learner1D(BaseLearner): |
247 | 244 |
if self._scale[1] > self._oldscale[1] * 2: |
248 | 245 |
|
249 | 246 |
for interval in self.losses: |
250 |
- self.update_interpolated_losses_in_interval(*interval) |
|
247 |
+ self.update_interpolated_loss_in_interval(*interval) |
|
251 | 248 |
|
252 | 249 |
self._oldscale = deepcopy(self._scale) |
253 | 250 |
|
254 |
- |
|
255 | 251 |
def ask(self, n, add_data=True): |
256 | 252 |
"""Return n points that are expected to maximally reduce the loss.""" |
257 | 253 |
# Find out how to divide the n points over the intervals |
... | ... |
@@ -265,43 +261,44 @@ class Learner1D(BaseLearner): |
265 | 261 |
return [], [] |
266 | 262 |
|
267 | 263 |
# If the bounds have not been chosen yet, we choose them first. |
268 |
- points = [] |
|
269 |
- for bound in self.bounds: |
|
270 |
- if bound not in self.data and bound not in self.pending_points: |
|
271 |
- points.append(bound) |
|
264 |
+ points = [b for b in self.bounds if b not in self.data |
|
265 |
+ and b not in self.pending_points] |
|
272 | 266 |
|
273 | 267 |
if len(points) == 2: |
274 | 268 |
# First time |
275 | 269 |
loss_improvements = [np.inf] * n |
276 |
- points = np.linspace(*self.bounds, n) |
|
270 |
+ points = np.linspace(*self.bounds, n).tolist() |
|
277 | 271 |
elif len(points) == 1: |
278 | 272 |
# Second time, if we previously returned just self.bounds[0] |
279 | 273 |
loss_improvements = [np.inf] * n |
280 |
- points = np.linspace(*self.bounds, n + 1)[1:] |
|
274 |
+ points = np.linspace(*self.bounds, n + 1)[1:].tolist() |
|
281 | 275 |
else: |
282 |
- def xs(x, n): |
|
276 |
+ def xs(x_left, x_right, n): |
|
283 | 277 |
if n == 1: |
278 |
+ # This is just an optimization |
|
284 | 279 |
return [] |
285 | 280 |
else: |
286 |
- step = (x[1] - x[0]) / n |
|
287 |
- return [x[0] + step * i for i in range(1, n)] |
|
281 |
+ step = (x_right - x_left) / n |
|
282 |
+ return [x_left + step * i for i in range(1, n)] |
|
288 | 283 |
|
289 | 284 |
# Calculate how many points belong to each interval. |
290 | 285 |
x_scale = self._scale[0] |
291 |
- quals = [(-loss if not math.isinf(loss) else (x0 - x1) / x_scale, (x0, x1), 1) |
|
292 |
- for ((x0, x1), loss) in self.losses_combined.items()] |
|
293 |
- |
|
286 |
+ quals = [((-loss if not math.isinf(loss) else -(x[1] - x[0]) / x_scale, x, 1)) |
|
287 |
+ for x, loss in self.losses_combined.items()] |
|
294 | 288 |
heapq.heapify(quals) |
295 | 289 |
|
296 | 290 |
for point_number in range(n): |
297 | 291 |
quality, x, n = quals[0] |
292 |
+ if abs(x[1] - x[0]) / (n + 1) <= self._dx_eps: |
|
293 |
+ # The interval is too small and should not be subdivided |
|
294 |
+ quality = np.inf |
|
298 | 295 |
heapq.heapreplace(quals, (quality * n / (n + 1), x, n + 1)) |
299 | 296 |
|
300 |
- points = list(itertools.chain.from_iterable(xs(x, n) |
|
301 |
- for quality, x, n in quals)) |
|
297 |
+ points = list(itertools.chain.from_iterable( |
|
298 |
+ xs(*x, n) for quality, x, n in quals)) |
|
302 | 299 |
|
303 | 300 |
loss_improvements = list(itertools.chain.from_iterable( |
304 |
- itertools.repeat(-quality, n-1) |
|
301 |
+ itertools.repeat(-quality, n - 1) |
|
305 | 302 |
for quality, x, n in quals)) |
306 | 303 |
|
307 | 304 |
if add_data: |
... | ... |
@@ -326,7 +323,6 @@ class Learner1D(BaseLearner): |
326 | 323 |
|
327 | 324 |
return p.redim(x=dict(range=plot_bounds)) |
328 | 325 |
|
329 |
- |
|
330 | 326 |
def remove_unfinished(self): |
331 | 327 |
self.pending_points = set() |
332 | 328 |
self.losses_combined = deepcopy(self.losses) |
... | ... |
@@ -12,7 +12,7 @@ import scipy.spatial |
12 | 12 |
import pytest |
13 | 13 |
|
14 | 14 |
from ..learner import * |
15 |
-from ..runner import replay_log |
|
15 |
+from ..runner import simple, replay_log |
|
16 | 16 |
|
17 | 17 |
|
18 | 18 |
def generate_random_parametrization(f): |
... | ... |
@@ -405,6 +405,62 @@ def test_termination_on_discontinuities(): |
405 | 405 |
assert smallest_interval >= 0.5E3 * np.finfo(float).eps |
406 | 406 |
|
407 | 407 |
|
408 |
+def test_loss_at_machine_precision_interval_is_zero(): |
|
409 |
+ """The loss of an interval smaller than _dx_eps |
|
410 |
+ should be set to zero.""" |
|
411 |
+ def f(x): |
|
412 |
+ return 1 if x == 0 else 0 |
|
413 |
+ |
|
414 |
+ def goal(l): |
|
415 |
+ return l.loss() < 0.01 or l.npoints >= 1000 |
|
416 |
+ |
|
417 |
+ learner = Learner1D(f, bounds=(-1, 1)) |
|
418 |
+ simple(learner, goal=goal) |
|
419 |
+ |
|
420 |
+ # this means loss < 0.01 was reached |
|
421 |
+ assert learner.npoints != 1000 |
|
422 |
+ |
|
423 |
+ |
|
424 |
+def small_deviations(x): |
|
425 |
+ import random |
|
426 |
+ return 0 if x <= 1 else 1 + 10**(-random.randint(12, 14)) |
|
427 |
+ |
|
428 |
+ |
|
429 |
+def test_small_deviations(): |
|
430 |
+ """This tests whether the Learner1D can handle small deviations. |
|
431 |
+ See https://gitlab.kwant-project.org/qt/adaptive/merge_requests/73 and |
|
432 |
+ https://gitlab.kwant-project.org/qt/adaptive/issues/61.""" |
|
433 |
+ |
|
434 |
+ eps = 5e-14 |
|
435 |
+ learner = Learner1D(small_deviations, bounds=(1 - eps, 1 + eps)) |
|
436 |
+ |
|
437 |
+ # Some non-determinism is needed to make this test fail so we keep |
|
438 |
+ # a list of points that will be evaluated later to emulate |
|
439 |
+ # parallel execution |
|
440 |
+ stash = [] |
|
441 |
+ |
|
442 |
+ for i in range(100): |
|
443 |
+ xs, _ = learner.ask(10) |
|
444 |
+ |
|
445 |
+ # Save 5 random points out of `xs` for later |
|
446 |
+ random.shuffle(xs) |
|
447 |
+ for _ in range(5): |
|
448 |
+ stash.append(xs.pop()) |
|
449 |
+ |
|
450 |
+ for x in xs: |
|
451 |
+ learner.tell(x, learner.function(x)) |
|
452 |
+ |
|
453 |
+ # Evaluate and add 5 random points from `stash` |
|
454 |
+ random.shuffle(stash) |
|
455 |
+ for _ in range(5): |
|
456 |
+ learner.tell(stash.pop(), learner.function(x)) |
|
457 |
+ |
|
458 |
+ if learner.loss() == 0: |
|
459 |
+ # If this condition is met, the learner can't return any |
|
460 |
+ # more points. |
|
461 |
+ break |
|
462 |
+ |
|
463 |
+ |
|
408 | 464 |
@pytest.mark.xfail |
409 | 465 |
@run_with(Learner1D, Learner2D, LearnerND) |
410 | 466 |
def test_convergence_for_arbitrary_ordering(learner_type, f, learner_kwargs): |