|
20 | 20 | from contextlib import contextmanager, suppress
|
21 | 21 | from datetime import datetime, timedelta, timezone
|
22 | 22 | from inspect import signature
|
| 23 | +from itertools import chain |
23 | 24 | from multiprocessing import Manager
|
24 | 25 | from pathlib import Path
|
25 | 26 | from typing import (
|
@@ -103,6 +104,7 @@ def split(seq: Iterable, n_parts: int) -> Iterable[tuple]:
|
103 | 104 | A list or other iterable that has to be split up.
|
104 | 105 | n_parts
|
105 | 106 | The sequence will be split up in this many parts.
|
| 107 | +
|
106 | 108 | """
|
107 | 109 | lst = list(seq)
|
108 | 110 | n = math.ceil(len(lst) / n_parts)
|
@@ -131,14 +133,15 @@ def split_in_balancing_learners(
|
131 | 133 | Returns
|
132 | 134 | -------
|
133 | 135 | new_learners, new_fnames
|
| 136 | +
|
134 | 137 | """
|
135 | 138 | new_learners = []
|
136 | 139 | new_fnames = []
|
137 | 140 | for x in split(zip(learners, fnames), n_parts):
|
138 | 141 | learners_part, fnames_part = zip(*x)
|
139 | 142 | learner = adaptive.BalancingLearner(learners_part, strategy=strategy)
|
140 | 143 | new_learners.append(learner)
|
141 |
| - new_fnames.append(fnames_part) |
| 144 | + new_fnames.append(list(fnames_part)) |
142 | 145 | return new_learners, new_fnames
|
143 | 146 |
|
144 | 147 |
|
@@ -169,6 +172,7 @@ def split_sequence_learner(
|
169 | 172 | List of `~adaptive.SequenceLearner`\s.
|
170 | 173 | new_fnames
|
171 | 174 | List of str based on a hash of the sequence.
|
| 175 | +
|
172 | 176 | """
|
173 | 177 | new_learners, new_fnames = split_sequence_in_sequence_learners(
|
174 | 178 | function=big_learner._original_function,
|
@@ -214,6 +218,7 @@ def split_sequence_in_sequence_learners(
|
214 | 218 | List of `~adaptive.SequenceLearner`\s.
|
215 | 219 | new_fnames
|
216 | 220 | List of str based on a hash of the sequence.
|
| 221 | +
|
217 | 222 | """
|
218 | 223 | folder = Path(folder)
|
219 | 224 | new_learners = []
|
@@ -248,11 +253,11 @@ def combine_sequence_learners(
|
248 | 253 | -------
|
249 | 254 | adaptive.SequenceLearner
|
250 | 255 | Big `~adaptive.SequenceLearner` with data from ``learners``.
|
| 256 | +
|
251 | 257 | """
|
252 | 258 | if big_learner is None:
|
253 |
| - big_sequence: list[Any] = sum( |
254 |
| - (list(learner.sequence) for learner in learners), |
255 |
| - [], |
| 259 | + big_sequence: list[Any] = list( |
| 260 | + chain.from_iterable(learner.sequence for learner in learners), |
256 | 261 | )
|
257 | 262 | big_learner = adaptive.SequenceLearner(
|
258 | 263 | learners[0]._original_function,
|
@@ -282,6 +287,7 @@ def copy_from_sequence_learner(
|
282 | 287 | Learner to take the data from.
|
283 | 288 | learner_to
|
284 | 289 | Learner to tell the data to.
|
| 290 | +
|
285 | 291 | """
|
286 | 292 | mapping = {
|
287 | 293 | hash_anything(learner_from.sequence[i]): v for i, v in learner_from.data.items()
|
@@ -420,6 +426,7 @@ def _remove_or_move_files(
|
420 | 426 | If None the file is removed.
|
421 | 427 | desc
|
422 | 428 | Description of the progressbar.
|
| 429 | +
|
423 | 430 | """
|
424 | 431 | n_failed = 0
|
425 | 432 | for fname in _progress(fnames, with_progress_bar, desc or "Removing files"):
|
@@ -463,6 +470,7 @@ def load_parallel(
|
463 | 470 | max_workers
|
464 | 471 | The maximum number of parallel threads when loading the data.
|
465 | 472 | If ``None``, use the maximum number of threads that is possible.
|
| 473 | +
|
466 | 474 | """
|
467 | 475 |
|
468 | 476 | def load(learner: adaptive.BaseLearner, fname: str) -> None:
|
@@ -492,6 +500,7 @@ def save_parallel(
|
492 | 500 | A list of filenames corresponding to `learners`.
|
493 | 501 | with_progress_bar
|
494 | 502 | Display a progress bar using `tqdm`.
|
| 503 | +
|
495 | 504 | """
|
496 | 505 |
|
497 | 506 | def save(learner: adaptive.BaseLearner, fname: str) -> None:
|
@@ -562,6 +571,7 @@ def connect_to_ipyparallel(
|
562 | 571 | -------
|
563 | 572 | client
|
564 | 573 | An IPyparallel client.
|
| 574 | +
|
565 | 575 | """
|
566 | 576 | from ipyparallel import Client
|
567 | 577 |
|
@@ -623,6 +633,7 @@ class LRUCachedCallable:
|
623 | 633 | Cache size of the LRU cache, by default 128.
|
624 | 634 | with_cloudpickle
|
625 | 635 | Use cloudpickle for storing the data in memory.
|
| 636 | +
|
626 | 637 | """
|
627 | 638 |
|
628 | 639 | def __init__(
|
@@ -1015,6 +1026,7 @@ def smart_goal(
|
1015 | 1026 | Returns
|
1016 | 1027 | -------
|
1017 | 1028 | Callable[[adaptive.BaseLearner], bool]
|
| 1029 | +
|
1018 | 1030 | """
|
1019 | 1031 | if callable(goal):
|
1020 | 1032 | return goal
|
@@ -1083,6 +1095,7 @@ class WrappedFunction:
|
1083 | 1095 | >>> wrapped_function = WrappedFunction(square)
|
1084 | 1096 | >>> wrapped_function(4)
|
1085 | 1097 | 16
|
| 1098 | +
|
1086 | 1099 | """
|
1087 | 1100 |
|
1088 | 1101 | def __init__(
|
@@ -1132,6 +1145,7 @@ def __call__(self, *args: Any, **kwargs: Any) -> Any:
|
1132 | 1145 | Any
|
1133 | 1146 | The result of calling the deserialized function with the provided
|
1134 | 1147 | arguments and keyword arguments.
|
| 1148 | +
|
1135 | 1149 | """
|
1136 | 1150 | global _GLOBAL_CACHE # noqa: PLW0602
|
1137 | 1151 |
|
@@ -1258,6 +1272,7 @@ async def _track_file_creation_progress(
|
1258 | 1272 | The time interval (in seconds) at which to update the progress. The interval is dynamically
|
1259 | 1273 | adjusted to be at least 50 times the time it takes to update the progress. This ensures that
|
1260 | 1274 | updating the progress does not take up a significant amount of time.
|
| 1275 | +
|
1261 | 1276 | """
|
1262 | 1277 | # create total_files and add_total_progress before updating paths_dict
|
1263 | 1278 | total_files = sum(len(paths) for paths in paths_dict.values())
|
@@ -1348,6 +1363,7 @@ def track_file_creation_progress(
|
1348 | 1363 | "example2": {Path("/path/to/file3"), Path("/path/to/file4")},
|
1349 | 1364 | }
|
1350 | 1365 | >>> task = track_file_creation_progress(paths_dict)
|
| 1366 | +
|
1351 | 1367 | """
|
1352 | 1368 | get_console().clear_live() # avoid LiveError, only 1 live render allowed at a time
|
1353 | 1369 | columns = (*Progress.get_default_columns(), TimeElapsedColumn())
|
|
0 commit comments