...
|
...
|
@@ -115,7 +115,7 @@
|
115
|
115
|
"source": [
|
116
|
116
|
"# The end condition is when the \"loss\" is less than 0.1. In the context of the\n",
|
117
|
117
|
"# 1D learner this means that we will resolve features in 'func' with width 0.1 or wider.\n",
|
118
|
|
- "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.05)\n",
|
|
118
|
+ "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)\n",
|
119
|
119
|
"runner.live_info()"
|
120
|
120
|
]
|
121
|
121
|
},
|
...
|
...
|
@@ -475,7 +475,7 @@
|
475
|
475
|
" return x + z**2 + np.exp(-(x**2 + y**2 + z**2 - 0.75**2)**2/a**4)\n",
|
476
|
476
|
"\n",
|
477
|
477
|
"learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)])\n",
|
478
|
|
- "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.01)\n",
|
|
478
|
+ "runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.001)\n",
|
479
|
479
|
"runner.live_info()"
|
480
|
480
|
]
|
481
|
481
|
},
|