|
23 | 23 | "outputs": [],
|
24 | 24 | "source": [
|
25 | 25 | "import numpy as np\n",
|
| 26 | + "\n", |
26 | 27 | "import adaptive_scheduler\n",
|
27 |
| - "import random\n", |
| 28 | + "\n", |
28 | 29 | "\n",
|
29 | 30 | "def h(x, width=0.01, offset=0):\n",
|
30 | 31 | " for _ in range(10): # Burn some CPU time just because\n",
|
31 | 32 | " np.linalg.eig(np.random.rand(1000, 1000))\n",
|
32 |
| - " return x + width ** 2 / (width ** 2 + (x - offset) ** 2)\n", |
| 33 | + " return x + width**2 / (width**2 + (x - offset) ** 2)\n", |
| 34 | + "\n", |
33 | 35 | "\n",
|
34 | 36 | "# Define the sequence/samples we want to run\n",
|
35 | 37 | "xs = np.linspace(0, 1, 10_000)\n",
|
36 | 38 | "\n",
|
37 | 39 | "# ⚠️ Here a `learner` is an `adaptive` concept, read it as `jobs`.\n",
|
38 | 40 | "# ⚠️ `fnames` are the result locations\n",
|
39 | 41 | "learners, fnames = adaptive_scheduler.utils.split_sequence_in_sequence_learners(\n",
|
40 |
| - " h, xs, n_learners=10\n", |
| 42 | + " h,\n", |
| 43 | + " xs,\n", |
| 44 | + " n_learners=10,\n", |
41 | 45 | ")\n",
|
42 | 46 | "\n",
|
43 | 47 | "run_manager = adaptive_scheduler.slurm_run(\n",
|
|
48 | 52 | " nodes=1, # number of nodes per `learner`\n",
|
49 | 53 | " cores_per_node=1, # number of cores on 1 node per `learner`\n",
|
50 | 54 | " log_interval=5, # how often to produce a log message\n",
|
51 |
| - " save_interval=5, # how often to save the results\n", |
| 55 | + " save_interval=5, # how often to save the results\n", |
52 | 56 | ")\n",
|
53 | 57 | "run_manager.start()"
|
54 | 58 | ]
|
|
85 | 89 | "from functools import partial\n",
|
86 | 90 | "\n",
|
87 | 91 | "import adaptive\n",
|
| 92 | + "\n", |
88 | 93 | "import adaptive_scheduler\n",
|
89 | 94 | "\n",
|
90 | 95 | "\n",
|
91 | 96 | "def h(x, width=0.01, offset=0):\n",
|
92 | 97 | " import numpy as np\n",
|
93 |
| - " import random\n", |
94 | 98 | "\n",
|
95 | 99 | " for _ in range(10): # Burn some CPU time just because\n",
|
96 | 100 | " np.linalg.eig(np.random.rand(1000, 1000))\n",
|
97 | 101 | "\n",
|
98 | 102 | " a = width\n",
|
99 |
| - " return x + a ** 2 / (a ** 2 + (x - offset) ** 2)\n", |
| 103 | + " return x + a**2 / (a**2 + (x - offset) ** 2)\n", |
100 | 104 | "\n",
|
101 | 105 | "\n",
|
102 | 106 | "offsets = [i / 10 - 0.5 for i in range(5)]\n",
|
|
266 | 270 | "outputs": [],
|
267 | 271 | "source": [
|
268 | 272 | "import numpy as np\n",
|
269 |
| - "\n", |
270 | 273 | "from adaptive import SequenceLearner\n",
|
271 |
| - "from adaptive_scheduler.utils import split, combo_to_fname\n", |
| 274 | + "\n", |
| 275 | + "from adaptive_scheduler.utils import split\n", |
272 | 276 | "\n",
|
273 | 277 | "\n",
|
274 | 278 | "def g(xyz):\n",
|
275 | 279 | " x, y, z = xyz\n",
|
276 | 280 | " for _ in range(5): # Burn some CPU time just because\n",
|
277 | 281 | " np.linalg.eig(np.random.rand(1000, 1000))\n",
|
278 |
| - " return x ** 2 + y ** 2 + z ** 2\n", |
| 282 | + " return x**2 + y**2 + z**2\n", |
279 | 283 | "\n",
|
280 | 284 | "\n",
|
281 | 285 | "xs = np.linspace(0, 10, 11)\n",
|
|
302 | 306 | "\n",
|
303 | 307 | "\n",
|
304 | 308 | "scheduler = adaptive_scheduler.scheduler.DefaultScheduler(\n",
|
305 |
| - " cores=10, executor_type=\"ipyparallel\",\n", |
| 309 | + " cores=10,\n", |
| 310 | + " executor_type=\"ipyparallel\",\n", |
306 | 311 | ") # PBS or SLURM\n",
|
307 | 312 | "\n",
|
308 | 313 | "run_manager2 = adaptive_scheduler.server_support.RunManager(\n",
|
309 |
| - " scheduler, learners, fnames, goal=goal, log_interval=30, save_interval=30,\n", |
| 314 | + " scheduler,\n", |
| 315 | + " learners,\n", |
| 316 | + " fnames,\n", |
| 317 | + " goal=goal,\n", |
| 318 | + " log_interval=30,\n", |
| 319 | + " save_interval=30,\n", |
310 | 320 | ")\n",
|
311 | 321 | "run_manager2.start()"
|
312 | 322 | ]
|
|
343 | 353 | "outputs": [],
|
344 | 354 | "source": [
|
345 | 355 | "import numpy as np\n",
|
346 |
| - "\n", |
347 | 356 | "from adaptive import SequenceLearner\n",
|
348 |
| - "from adaptive_scheduler.utils import split, combo2fname\n", |
349 | 357 | "from adaptive.utils import named_product\n",
|
350 | 358 | "\n",
|
| 359 | + "from adaptive_scheduler.utils import combo2fname\n", |
| 360 | + "\n", |
351 | 361 | "\n",
|
352 | 362 | "def g(combo):\n",
|
353 | 363 | " x, y, z = combo[\"x\"], combo[\"y\"], combo[\"z\"]\n",
|
354 | 364 | "\n",
|
355 | 365 | " for _ in range(5): # Burn some CPU time just because\n",
|
356 | 366 | " np.linalg.eig(np.random.rand(1000, 1000))\n",
|
357 | 367 | "\n",
|
358 |
| - " return x ** 2 + y ** 2 + z ** 2\n", |
| 368 | + " return x**2 + y**2 + z**2\n", |
359 | 369 | "\n",
|
360 | 370 | "\n",
|
361 | 371 | "combos = named_product(x=np.linspace(0, 10), y=np.linspace(-1, 1), z=np.linspace(-3, 3))\n",
|
|
364 | 374 | "\n",
|
365 | 375 | "# We could run this as 1 job with N nodes, but we can also split it up in multiple jobs.\n",
|
366 | 376 | "# This is desireable when you don't want to run a single job with 300 nodes for example.\n",
|
367 |
| - "# Note that \n", |
| 377 | + "# Note that\n", |
368 | 378 | "# `adaptive_scheduler.utils.split_sequence_in_sequence_learners(g, combos, 100, \"data\")`\n",
|
369 | 379 | "# does the same!\n",
|
370 | 380 | "\n",
|
371 | 381 | "njobs = 100\n",
|
372 | 382 | "split_combos = list(split(combos, njobs))\n",
|
373 | 383 | "\n",
|
374 | 384 | "print(\n",
|
375 |
| - " f\"Length of split_combos: {len(split_combos)} and length of split_combos[0]: {len(split_combos[0])}.\"\n", |
| 385 | + " f\"Length of split_combos: {len(split_combos)} and length of split_combos[0]: {len(split_combos[0])}.\",\n", |
376 | 386 | ")\n",
|
377 | 387 | "\n",
|
378 | 388 | "learners = [SequenceLearner(g, combos_part) for combos_part in split_combos]\n",
|
|
393 | 403 | "outputs": [],
|
394 | 404 | "source": [
|
395 | 405 | "from functools import partial\n",
|
| 406 | + "\n", |
396 | 407 | "import adaptive_scheduler\n",
|
397 |
| - "from adaptive_scheduler.scheduler import DefaultScheduler, PBS, SLURM\n", |
| 408 | + "from adaptive_scheduler.scheduler import SLURM, DefaultScheduler\n", |
398 | 409 | "\n",
|
399 | 410 | "\n",
|
400 | 411 | "def goal(learner):\n",
|
401 | 412 | " return learner.done() # the standard goal for a SequenceLearner\n",
|
402 | 413 | "\n",
|
403 | 414 | "\n",
|
404 |
| - "extra_scheduler = (\n", |
405 |
| - " [\"--exclusive\", \"--time=24:00:00\"] if DefaultScheduler is SLURM else []\n", |
406 |
| - ")\n", |
| 415 | + "extra_scheduler = [\"--exclusive\", \"--time=24:00:00\"] if DefaultScheduler is SLURM else []\n", |
407 | 416 | "\n",
|
408 | 417 | "scheduler = adaptive_scheduler.scheduler.DefaultScheduler(\n",
|
409 | 418 | " cores=10,\n",
|
|
459 | 468 | "source": [
|
460 | 469 | "run_manager3.load_learners() # load the data into the learners\n",
|
461 | 470 | "result = sum(\n",
|
462 |
| - " [l.result() for l in learners], []\n", |
| 471 | + " [l.result() for l in learners],\n", |
| 472 | + " [],\n", |
463 | 473 | ") # combine all learner's result into 1 list"
|
464 | 474 | ]
|
465 | 475 | }
|
|
0 commit comments