Skip to content

Commit fee0703

Browse files
[pre-commit.ci] pre-commit autoupdate (#188)
* [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.3 → v0.2.1](astral-sh/ruff-pre-commit@v0.1.3...v0.2.1) - [github.com/pre-commit/mirrors-mypy: v1.6.1 → v1.8.0](pre-commit/mirrors-mypy@v1.6.1...v1.8.0) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix type * format * fi --------- Co-authored-by: Bas Nijholt <[email protected]>
1 parent b013122 commit fee0703

15 files changed

+50
-10
lines changed

.pre-commit-config.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@ repos:
99
- id: debug-statements
1010
- id: check-ast
1111
- repo: https://github.com/astral-sh/ruff-pre-commit
12-
rev: "v0.1.3"
12+
rev: "v0.2.1"
1313
hooks:
1414
- id: ruff
1515
exclude: docs/source/conf.py|ipynb_filter.py
1616
args: ["--fix"]
1717
- id: ruff-format
1818
- repo: https://github.com/pre-commit/mirrors-mypy
19-
rev: "v1.6.1"
19+
rev: "v1.8.0"
2020
hooks:
2121
- id: mypy
2222
exclude: ipynb_filter.py|docs/source/conf.py

adaptive_scheduler/_mock_scheduler.py

+1
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@ class MockScheduler:
5353
``bash`` executable.
5454
url
5555
The URL of the socket. Defaults to {DEFAULT_URL}.
56+
5657
"""
5758

5859
def __init__(

adaptive_scheduler/_scheduler/base_scheduler.py

+4
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ class BaseScheduler(abc.ABC):
5959
Returns
6060
-------
6161
`BaseScheduler` object.
62+
6263
"""
6364

6465
_ext: ClassVar[str]
@@ -113,6 +114,7 @@ def queue(self, *, me_only: bool = True) -> dict[str, dict]:
113114
-----
114115
This function might return extra information about the job, however
115116
this is not used elsewhere in this package.
117+
116118
"""
117119

118120
def queue_df(self) -> pd.DataFrame:
@@ -141,6 +143,7 @@ def job_script(self, options: dict[str, Any], *, index: int | None = None) -> st
141143
index
142144
The index of the job that is being run. This is used when
143145
specifying different resources for different jobs.
146+
144147
"""
145148

146149
@property
@@ -188,6 +191,7 @@ def cancel(
188191
Display a progress bar using `tqdm`.
189192
max_tries
190193
Maximum number of attempts to cancel a job.
194+
191195
"""
192196

193197
def cancel_jobs(job_ids: list[str]) -> None:

adaptive_scheduler/_scheduler/local.py

+1
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ def job_script(self, options: dict[str, Any], *, index: int | None = None) -> st
8888
Currently, there is a problem that this will not properly cleanup.
8989
for example `ipengine ... &` will be detached and go on,
9090
normally a scheduler will take care of this.
91+
9192
"""
9293
job_script = textwrap.dedent(
9394
"""\

adaptive_scheduler/_scheduler/pbs.py

+1
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,7 @@ def job_script(self, options: dict[str, Any], *, index: int | None = None) -> st
135135
The index of the job that is being run. This is used when
136136
specifying different resources for different jobs.
137137
Currently not implemented for PBS!
138+
138139
"""
139140
job_script = textwrap.dedent(
140141
f"""\

adaptive_scheduler/_scheduler/slurm.py

+2
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ class SLURM(BaseScheduler):
101101
extra_script
102102
Extra script that will be executed after any environment variables are set,
103103
but before the main scheduler is run.
104+
104105
"""
105106

106107
# Attributes that all schedulers need to have
@@ -297,6 +298,7 @@ def job_script(self, options: dict[str, Any], *, index: int | None = None) -> st
297298
index
298299
The index of the job that is being run. This is used when
299300
specifying different resources for different jobs.
301+
300302
"""
301303
cores = self._get_cores(index=index)
302304
job_script = textwrap.dedent(

adaptive_scheduler/_server_support/common.py

+3
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ def get_allowed_url() -> str:
4747
url
4848
An url that can be used for the database manager, with the format
4949
``tcp://ip_of_this_machine:allowed_port.``.
50+
5051
"""
5152
ip = socket.gethostbyname(socket.gethostname())
5253
port = zmq.ssh.tunnel.select_random_ports(1)[0]
@@ -98,6 +99,7 @@ def cleanup_scheduler_files(
9899
If None the file is removed.
99100
log_file_folder
100101
The folder in which to delete the log-files.
102+
101103
"""
102104
to_rm = _get_all_files(job_names, scheduler)
103105

@@ -162,6 +164,7 @@ def periodically_clean_ipython_profiles(
162164
Returns
163165
-------
164166
asyncio.Task
167+
165168
"""
166169

167170
async def clean(interval: float) -> None:

adaptive_scheduler/_server_support/database_manager.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def __init__(self, db_fname: str | Path, *, clear_existing: bool = False) -> Non
8484
raw_data = json.load(f)
8585
self._data = [_DBEntry(**entry) for entry in raw_data["data"]]
8686

87-
def all(self) -> list[_DBEntry]: # noqa: A003
87+
def all(self) -> list[_DBEntry]:
8888
return self._data
8989

9090
def insert_multiple(self, entries: list[_DBEntry]) -> None:
@@ -152,6 +152,7 @@ class DatabaseManager(BaseManager):
152152
----------
153153
failed : list
154154
A list of entries that have failed and have been removed from the database.
155+
155156
"""
156157

157158
def __init__(
@@ -363,6 +364,7 @@ async def _manage(self) -> None:
363364
Returns
364365
-------
365366
coroutine
367+
366368
"""
367369
log.debug("started database")
368370
socket = ctx.socket(zmq.REP)

adaptive_scheduler/_server_support/job_manager.py

+2
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@ def command_line_options(
6565
-------
6666
dict
6767
The command line options for the job_script.
68+
6869
"""
6970
if runner_kwargs is None:
7071
runner_kwargs = {}
@@ -142,6 +143,7 @@ class JobManager(BaseManager):
142143
----------
143144
n_started : int
144145
Total number of jobs started by the `JobManager`.
146+
145147
"""
146148

147149
def __init__(

adaptive_scheduler/_server_support/kill_manager.py

+2
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ def logs_with_string_or_condition(
4040
-------
4141
has_string
4242
A list ``(job_name, fnames)``, which have the string inside their log-file.
43+
4344
"""
4445
if isinstance(error, str):
4546
has_error = lambda lines: error in "".join(lines) # noqa: E731
@@ -90,6 +91,7 @@ class KillManager(BaseManager):
9091
move_to
9192
If a job is cancelled the log is either removed (if ``move_to=None``)
9293
or moved to a folder (e.g. if ``move_to='old_logs'``).
94+
9395
"""
9496

9597
def __init__(

adaptive_scheduler/_server_support/parse_logs.py

+1
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ def parse_log_files(
5252
Returns
5353
-------
5454
`~pandas.core.frame.DataFrame`
55+
5556
"""
5657
_queue = scheduler.queue()
5758
database_manager.update(_queue)

adaptive_scheduler/client_support.py

+3
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ def get_learner(
7777
The filename of the learner that was chosen.
7878
initializer
7979
A function that runs before the process is forked.
80+
8081
"""
8182
_add_log_file_handler(log_fname)
8283
log.info(
@@ -120,6 +121,7 @@ def tell_done(url: str, fname: str | list[str]) -> None:
120121
(`adaptive_scheduler.server_support.manage_database`).
121122
fname
122123
The filename of the learner that is done.
124+
123125
"""
124126
log.info("goal reached! 🎉🎊🥳")
125127
with ctx.socket(zmq.REQ) as socket:
@@ -173,6 +175,7 @@ def log_info(runner: AsyncRunner, interval: float = 300) -> asyncio.Task:
173175
Adaptive Runner instance.
174176
interval
175177
Time in seconds between log entries.
178+
176179
"""
177180

178181
async def coro(runner: AsyncRunner, interval: float) -> None:

adaptive_scheduler/utils.py

+20-4
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from contextlib import contextmanager, suppress
2121
from datetime import datetime, timedelta, timezone
2222
from inspect import signature
23+
from itertools import chain
2324
from multiprocessing import Manager
2425
from pathlib import Path
2526
from typing import (
@@ -103,6 +104,7 @@ def split(seq: Iterable, n_parts: int) -> Iterable[tuple]:
103104
A list or other iterable that has to be split up.
104105
n_parts
105106
The sequence will be split up in this many parts.
107+
106108
"""
107109
lst = list(seq)
108110
n = math.ceil(len(lst) / n_parts)
@@ -131,14 +133,15 @@ def split_in_balancing_learners(
131133
Returns
132134
-------
133135
new_learners, new_fnames
136+
134137
"""
135138
new_learners = []
136139
new_fnames = []
137140
for x in split(zip(learners, fnames), n_parts):
138141
learners_part, fnames_part = zip(*x)
139142
learner = adaptive.BalancingLearner(learners_part, strategy=strategy)
140143
new_learners.append(learner)
141-
new_fnames.append(fnames_part)
144+
new_fnames.append(list(fnames_part))
142145
return new_learners, new_fnames
143146

144147

@@ -169,6 +172,7 @@ def split_sequence_learner(
169172
List of `~adaptive.SequenceLearner`\s.
170173
new_fnames
171174
List of str based on a hash of the sequence.
175+
172176
"""
173177
new_learners, new_fnames = split_sequence_in_sequence_learners(
174178
function=big_learner._original_function,
@@ -214,6 +218,7 @@ def split_sequence_in_sequence_learners(
214218
List of `~adaptive.SequenceLearner`\s.
215219
new_fnames
216220
List of str based on a hash of the sequence.
221+
217222
"""
218223
folder = Path(folder)
219224
new_learners = []
@@ -248,11 +253,11 @@ def combine_sequence_learners(
248253
-------
249254
adaptive.SequenceLearner
250255
Big `~adaptive.SequenceLearner` with data from ``learners``.
256+
251257
"""
252258
if big_learner is None:
253-
big_sequence: list[Any] = sum(
254-
(list(learner.sequence) for learner in learners),
255-
[],
259+
big_sequence: list[Any] = list(
260+
chain.from_iterable(learner.sequence for learner in learners),
256261
)
257262
big_learner = adaptive.SequenceLearner(
258263
learners[0]._original_function,
@@ -282,6 +287,7 @@ def copy_from_sequence_learner(
282287
Learner to take the data from.
283288
learner_to
284289
Learner to tell the data to.
290+
285291
"""
286292
mapping = {
287293
hash_anything(learner_from.sequence[i]): v for i, v in learner_from.data.items()
@@ -420,6 +426,7 @@ def _remove_or_move_files(
420426
If None the file is removed.
421427
desc
422428
Description of the progressbar.
429+
423430
"""
424431
n_failed = 0
425432
for fname in _progress(fnames, with_progress_bar, desc or "Removing files"):
@@ -463,6 +470,7 @@ def load_parallel(
463470
max_workers
464471
The maximum number of parallel threads when loading the data.
465472
If ``None``, use the maximum number of threads that is possible.
473+
466474
"""
467475

468476
def load(learner: adaptive.BaseLearner, fname: str) -> None:
@@ -492,6 +500,7 @@ def save_parallel(
492500
A list of filenames corresponding to `learners`.
493501
with_progress_bar
494502
Display a progress bar using `tqdm`.
503+
495504
"""
496505

497506
def save(learner: adaptive.BaseLearner, fname: str) -> None:
@@ -562,6 +571,7 @@ def connect_to_ipyparallel(
562571
-------
563572
client
564573
An IPyparallel client.
574+
565575
"""
566576
from ipyparallel import Client
567577

@@ -623,6 +633,7 @@ class LRUCachedCallable:
623633
Cache size of the LRU cache, by default 128.
624634
with_cloudpickle
625635
Use cloudpickle for storing the data in memory.
636+
626637
"""
627638

628639
def __init__(
@@ -1015,6 +1026,7 @@ def smart_goal(
10151026
Returns
10161027
-------
10171028
Callable[[adaptive.BaseLearner], bool]
1029+
10181030
"""
10191031
if callable(goal):
10201032
return goal
@@ -1083,6 +1095,7 @@ class WrappedFunction:
10831095
>>> wrapped_function = WrappedFunction(square)
10841096
>>> wrapped_function(4)
10851097
16
1098+
10861099
"""
10871100

10881101
def __init__(
@@ -1132,6 +1145,7 @@ def __call__(self, *args: Any, **kwargs: Any) -> Any:
11321145
Any
11331146
The result of calling the deserialized function with the provided
11341147
arguments and keyword arguments.
1148+
11351149
"""
11361150
global _GLOBAL_CACHE # noqa: PLW0602
11371151

@@ -1258,6 +1272,7 @@ async def _track_file_creation_progress(
12581272
The time interval (in seconds) at which to update the progress. The interval is dynamically
12591273
adjusted to be at least 50 times the time it takes to update the progress. This ensures that
12601274
updating the progress does not take up a significant amount of time.
1275+
12611276
"""
12621277
# create total_files and add_total_progress before updating paths_dict
12631278
total_files = sum(len(paths) for paths in paths_dict.values())
@@ -1348,6 +1363,7 @@ def track_file_creation_progress(
13481363
"example2": {Path("/path/to/file3"), Path("/path/to/file4")},
13491364
}
13501365
>>> task = track_file_creation_progress(paths_dict)
1366+
13511367
"""
13521368
get_console().clear_live() # avoid LiveError, only 1 live render allowed at a time
13531369
columns = (*Progress.get_default_columns(), TimeElapsedColumn())

pyproject.toml

+4-2
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,8 @@ exclude_lines = [
112112
[tool.ruff]
113113
line-length = 88
114114
target-version = "py38"
115+
116+
[tool.ruff.lint]
115117
select = ["ALL"]
116118
ignore = [
117119
"T20", # flake8-print
@@ -132,12 +134,12 @@ ignore = [
132134
"E501", # Line too long
133135
]
134136

135-
[tool.ruff.per-file-ignores]
137+
[tool.ruff.lint.per-file-ignores]
136138
"tests/*" = ["SLF001", "PLR2004"]
137139
"tests/test_examples.py" = ["E501"]
138140
".github/*" = ["INP001"]
139141

140-
[tool.ruff.mccabe]
142+
[tool.ruff.lint.mccabe]
141143
max-complexity = 18
142144

143145
[tool.mypy]

tests/test_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def test_split_in_balancing_learners(
6060
)
6161
assert len(new_learners) == n_parts
6262
assert all(isinstance(lrn, adaptive.BalancingLearner) for lrn in new_learners)
63-
assert new_fnames == [(fnames[0],), (fnames[1],)]
63+
assert new_fnames == [[fnames[0]], [fnames[1]]]
6464

6565

6666
def test_split_sequence_learner() -> None:

0 commit comments

Comments
 (0)