...
|
...
|
@@ -243,7 +243,6 @@ def test_expected_loss_improvement_is_less_than_total_loss(learner_type, f, lear
|
243
|
243
|
assert sum(loss_improvements) < learner.loss()
|
244
|
244
|
|
245
|
245
|
|
246
|
|
-@pytest.mark.xfail
|
247
|
246
|
@run_with(Learner1D, Learner2D)
|
248
|
247
|
def test_learner_subdomain(learner_type, f, learner_kwargs):
|
249
|
248
|
"""Learners that never receive data outside of a subdomain should
|
...
|
...
|
@@ -252,7 +251,6 @@ def test_learner_subdomain(learner_type, f, learner_kwargs):
|
252
|
251
|
raise NotImplementedError()
|
253
|
252
|
|
254
|
253
|
|
255
|
|
-@pytest.mark.xfail
|
256
|
254
|
@run_with(Learner1D, Learner2D)
|
257
|
255
|
def test_learner_performance_is_invariant_under_scaling(learner_type, f, learner_kwargs):
|
258
|
256
|
"""Learners behave identically under transformations that leave
|
...
|
...
|
@@ -265,7 +263,6 @@ def test_learner_performance_is_invariant_under_scaling(learner_type, f, learner
|
265
|
263
|
raise NotImplementedError()
|
266
|
264
|
|
267
|
265
|
|
268
|
|
-@pytest.mark.xfail
|
269
|
266
|
@run_with(Learner1D, Learner2D)
|
270
|
267
|
def test_convergence_for_arbitrary_ordering(learner_type, f, learner_kwargs):
|
271
|
268
|
"""Learners that are learning the same function should converge
|