Because data is now in the 'BaseLearner'
Bas Nijholt authored on 28/10/2018 14:22:27... | ... |
@@ -15,7 +15,7 @@ from ..notebook_integration import ensure_holoviews |
15 | 15 |
from ..utils import cache_latest |
16 | 16 |
|
17 | 17 |
|
18 |
-def uniform_loss(interval, scale, function_values, neighbors): |
|
18 |
+def uniform_loss(interval, scale, data, neighbors): |
|
19 | 19 |
"""Loss function that samples the domain uniformly. |
20 | 20 |
|
21 | 21 |
Works with `~adaptive.Learner1D` only. |
... | ... |
@@ -36,7 +36,7 @@ def uniform_loss(interval, scale, function_values, neighbors): |
36 | 36 |
return dx |
37 | 37 |
|
38 | 38 |
|
39 |
-def default_loss(interval, scale, function_values, neighbors): |
|
39 |
+def default_loss(interval, scale, data, neighbors): |
|
40 | 40 |
"""Calculate loss on a single interval. |
41 | 41 |
|
42 | 42 |
Currently returns the rescaled length of the interval. If one of the |
... | ... |
@@ -44,7 +44,7 @@ def default_loss(interval, scale, function_values, neighbors): |
44 | 44 |
never touched. This behavior should be improved later. |
45 | 45 |
""" |
46 | 46 |
x_left, x_right = interval |
47 |
- y_right, y_left = function_values[x_right], function_values[x_left] |
|
47 |
+ y_right, y_left = data[x_right], data[x_left] |
|
48 | 48 |
x_scale, y_scale = scale |
49 | 49 |
dx = (x_right - x_left) / x_scale |
50 | 50 |
if y_scale == 0: |
... | ... |
@@ -70,7 +70,7 @@ def _loss_of_multi_interval(xs, ys): |
70 | 70 |
return sum(vol(pts[i:i+3]) for i in range(N)) / N |
71 | 71 |
|
72 | 72 |
|
73 |
-def triangle_loss(interval, scale, function_values, neighbors): |
|
73 |
+def triangle_loss(interval, scale, data, neighbors): |
|
74 | 74 |
x_left, x_right = interval |
75 | 75 |
xs = [neighbors[x_left][0], x_left, x_right, neighbors[x_right][1]] |
76 | 76 |
xs = [x for x in xs if x is not None] |
... | ... |
@@ -79,15 +79,15 @@ def triangle_loss(interval, scale, function_values, neighbors): |
79 | 79 |
return (x_right - x_left) / scale[0] |
80 | 80 |
else: |
81 | 81 |
y_scale = scale[1] or 1 |
82 |
- ys_scaled = [function_values[x] / y_scale for x in xs] |
|
82 |
+ ys_scaled = [data[x] / y_scale for x in xs] |
|
83 | 83 |
xs_scaled = [x / scale[0] for x in xs] |
84 | 84 |
return _loss_of_multi_interval(xs_scaled, ys_scaled) |
85 | 85 |
|
86 | 86 |
|
87 | 87 |
def get_curvature_loss(area_factor=1, euclid_factor=0.02, horizontal_factor=0.02): |
88 |
- def curvature_loss(interval, scale, function_values, neighbors): |
|
89 |
- triangle_loss_ = triangle_loss(interval, scale, function_values, neighbors) |
|
90 |
- default_loss_ = default_loss(interval, scale, function_values, neighbors) |
|
88 |
+ def curvature_loss(interval, scale, data, neighbors): |
|
89 |
+ triangle_loss_ = triangle_loss(interval, scale, data, neighbors) |
|
90 |
+ default_loss_ = default_loss(interval, scale, data, neighbors) |
|
91 | 91 |
dx = (interval[1] - interval[0]) / scale[0] |
92 | 92 |
return (area_factor * (triangle_loss_**0.5) |
93 | 93 |
+ euclid_factor * default_loss_ |
... | ... |
@@ -163,11 +163,13 @@ class Learner1D(BaseLearner): |
163 | 163 |
scale : (float, float) |
164 | 164 |
The x and y scale over all the intervals, useful for rescaling the |
165 | 165 |
interval loss. |
166 |
- function_values : dict(float → float) |
|
166 |
+ data : dict(float → float) |
|
167 | 167 |
A map containing evaluated function values. It is guaranteed |
168 | 168 |
to have values for both of the points in 'interval'. |
169 | 169 |
neighbors : dict(float → (float, float)) |
170 | 170 |
A map containing points as keys to its neighbors as a tuple. |
171 |
+ At the left ``x_left`` and right ``x_left`` most boundary it has |
|
172 |
+ ``x_left: (None, float)`` and ``x_right: (float, None)``. |
|
171 | 173 |
""" |
172 | 174 |
|
173 | 175 |
def __init__(self, function, bounds, loss_per_interval=None, nn_neighbors=0): |
... | ... |
@@ -60,8 +60,8 @@ simple (but naive) strategy is to *uniformly* sample the domain: |
60 | 60 |
|
61 | 61 |
.. jupyter-execute:: |
62 | 62 |
|
63 |
- def uniform_sampling_1d(interval, scale, function_values): |
|
64 |
- # Note that we never use 'function_values'; the loss is just the size of the subdomain |
|
63 |
+ def uniform_sampling_1d(interval, scale, data): |
|
64 |
+ # Note that we never use 'data'; the loss is just the size of the subdomain |
|
65 | 65 |
x_left, x_right = interval |
66 | 66 |
x_scale, _ = scale |
67 | 67 |
dx = (x_right - x_left) / x_scale |
... | ... |
@@ -559,8 +559,8 @@ |
559 | 559 |
"metadata": {}, |
560 | 560 |
"outputs": [], |
561 | 561 |
"source": [ |
562 |
- "def uniform_sampling_1d(interval, scale, function_values):\n", |
|
563 |
- " # Note that we never use 'function_values'; the loss is just the size of the subdomain\n", |
|
562 |
+ "def uniform_sampling_1d(interval, scale, data):\n", |
|
563 |
+ " # Note that we never use 'data'; the loss is just the size of the subdomain\n", |
|
564 | 564 |
" x_left, x_right = interval\n", |
565 | 565 |
" x_scale, _ = scale\n", |
566 | 566 |
" dx = (x_right - x_left) / x_scale\n", |