Skip to content

Commit f69c90d

Browse files
committed
Use sphinx-autodoc-typehints
1 parent f041182 commit f69c90d

16 files changed

+183
-183
lines changed

adaptive_scheduler/_mock_scheduler.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -43,15 +43,15 @@ class MockScheduler:
4343
4444
Parameters
4545
----------
46-
startup_delay : int
46+
startup_delay
4747
The waiting before starting the process.
48-
max_running_jobs : int
48+
max_running_jobs
4949
Maximum number of simultaneously running jobs.
50-
refresh_interval : int
50+
refresh_interval
5151
Refresh interval of checking whether proccesses are still running.
52-
bash : str, default: "bash"
52+
bash
5353
``bash`` executable.
54-
url : str, optional
54+
url
5555
The URL of the socket. Defaults to {DEFAULT_URL}.
5656
"""
5757

adaptive_scheduler/_scheduler/base_scheduler.py

+16-16
Original file line numberDiff line numberDiff line change
@@ -27,33 +27,33 @@ class BaseScheduler(abc.ABC):
2727
2828
Parameters
2929
----------
30-
cores : int
30+
cores
3131
Number of cores per job (so per learner.)
32-
python_executable : str, default: `sys.executable`
32+
python_executable
3333
The Python executable that should run adaptive-scheduler. By default
3434
it uses the same Python as where this function is called.
35-
log_folder : str, default: ""
35+
log_folder
3636
The folder in which to put the log-files.
37-
mpiexec_executable : str, optional
37+
mpiexec_executable
3838
``mpiexec`` executable. By default `mpiexec` will be
3939
used (so probably from ``conda``).
40-
executor_type : str
40+
executor_type
4141
The executor that is used, by default `mpi4py.futures.MPIPoolExecutor` is used.
4242
One can use ``"ipyparallel"``, ``"dask-mpi"``, ``"mpi4py"``,
4343
``"loky"``, or ``"process-pool"``.
44-
num_threads : int, default 1
44+
num_threads
4545
``MKL_NUM_THREADS``, ``OPENBLAS_NUM_THREADS``, ``OMP_NUM_THREADS``, and
4646
``NUMEXPR_NUM_THREADS`` will be set to this number.
47-
extra_scheduler : list, optional
47+
extra_scheduler
4848
Extra ``#SLURM`` (depending on scheduler type)
4949
arguments, e.g. ``["--exclusive=user", "--time=1"]``.
50-
extra_env_vars : list, optional
50+
extra_env_vars
5151
Extra environment variables that are exported in the job
5252
script. e.g. ``["TMPDIR='/scratch'", "PYTHONPATH='my_dir:$PYTHONPATH'"]``.
53-
extra_script : str, optional
53+
extra_script
5454
Extra script that will be executed after any environment variables are set,
5555
but before the main scheduler is run.
56-
batch_folder : str, default: ""
56+
batch_folder
5757
The folder in which to put the batch files.
5858
5959
Returns
@@ -99,12 +99,12 @@ def queue(self, *, me_only: bool = True) -> dict[str, dict]:
9999
100100
Parameters
101101
----------
102-
me_only : bool, default: True
102+
me_only
103103
Only see your jobs.
104104
105105
Returns
106106
-------
107-
queue : dict
107+
queue
108108
Mapping of ``job_id`` -> `dict` with ``name`` and ``state``, for
109109
example ``{job_id: {"job_name": "TEST_JOB-1", "state": "R" or "Q"}}``.
110110
@@ -135,7 +135,7 @@ def job_script(self, options: dict[str, Any]) -> str:
135135
136136
Returns
137137
-------
138-
job_script : str
138+
job_script
139139
A job script that can be submitted to the scheduler.
140140
"""
141141

@@ -174,11 +174,11 @@ def cancel(
174174
175175
Parameters
176176
----------
177-
job_names : list
177+
job_names
178178
List of job names.
179-
with_progress_bar : bool, default: True
179+
with_progress_bar
180180
Display a progress bar using `tqdm`.
181-
max_tries : int, default: 5
181+
max_tries
182182
Maximum number of attempts to cancel a job.
183183
"""
184184

adaptive_scheduler/_scheduler/local.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def job_script(self, options: dict[str, Any]) -> str:
7777
7878
Returns
7979
-------
80-
job_script : str
80+
job_script
8181
A job script that can be submitted to PBS.
8282
8383
Notes

adaptive_scheduler/_scheduler/pbs.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ def job_script(self, options: dict[str, Any]) -> str:
128128
129129
Returns
130130
-------
131-
job_script : str
131+
job_script
132132
A job script that can be submitted to PBS.
133133
"""
134134
job_script = textwrap.dedent(

adaptive_scheduler/_scheduler/slurm.py

+11-11
Original file line numberDiff line numberDiff line change
@@ -23,38 +23,38 @@ class SLURM(BaseScheduler):
2323
2424
Parameters
2525
----------
26-
cores : int | None
26+
cores
2727
Number of cores per job (so per learner.)
2828
Either use `cores` or `nodes` and `cores_per_node`.
29-
nodes : int | None
29+
nodes
3030
Number of nodes per job (so per learner.)
3131
Either `nodes` and `cores_per_node` or use `cores`.
3232
cores_per_node: int | None
3333
Number of cores per node.
3434
Either `nodes` and `cores_per_node` or use `cores`.
3535
partition: str | None
3636
The SLURM partition to submit the job to.
37-
exclusive : bool
37+
exclusive
3838
Whether to use exclusive nodes (e.g., if SLURM it adds ``--exclusive`` as option).
39-
log_folder : str, default: ""
39+
log_folder
4040
The folder in which to put the log-files.
41-
mpiexec_executable : str, optional
41+
mpiexec_executable
4242
``mpiexec`` executable. By default `mpiexec` will be
4343
used (so probably from ``conda``).
44-
executor_type : str
44+
executor_type
4545
The executor that is used, by default `mpi4py.futures.MPIPoolExecutor` is used.
4646
One can use ``"ipyparallel"``, ``"dask-mpi"``, ``"mpi4py"``,
4747
``"loky"``, or ``"process-pool"``.
48-
num_threads : int, default 1
48+
num_threads
4949
``MKL_NUM_THREADS``, ``OPENBLAS_NUM_THREADS``, ``OMP_NUM_THREADS``, and
5050
``NUMEXPR_NUM_THREADS`` will be set to this number.
51-
extra_scheduler : list, optional
51+
extra_scheduler
5252
Extra ``#SLURM`` (depending on scheduler type)
5353
arguments, e.g. ``["--exclusive=user", "--time=1"]``.
54-
extra_env_vars : list, optional
54+
extra_env_vars
5555
Extra environment variables that are exported in the job
5656
script. e.g. ``["TMPDIR='/scratch'", "PYTHONPATH='my_dir:$PYTHONPATH'"]``.
57-
extra_script : str, optional
57+
extra_script
5858
Extra script that will be executed after any environment variables are set,
5959
but before the main scheduler is run.
6060
"""
@@ -183,7 +183,7 @@ def job_script(self, options: dict[str, Any]) -> str:
183183
184184
Returns
185185
-------
186-
job_script : str
186+
job_script
187187
A job script that can be submitted to SLURM.
188188
"""
189189
job_script = textwrap.dedent(

adaptive_scheduler/_server_support/common.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def get_allowed_url() -> str:
4444
4545
Returns
4646
-------
47-
url : str
47+
url
4848
An url that can be used for the database manager, with the format
4949
``tcp://ip_of_this_machine:allowed_port.``.
5050
"""
@@ -87,16 +87,16 @@ def cleanup_scheduler_files(
8787
8888
Parameters
8989
----------
90-
job_names : list
90+
job_names
9191
List of job names.
92-
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
92+
scheduler
9393
A scheduler instance from `adaptive_scheduler.scheduler`.
94-
with_progress_bar : bool, default: True
94+
with_progress_bar
9595
Display a progress bar using `tqdm`.
96-
move_to : str, default: None
96+
move_to
9797
Move the file to a different directory.
9898
If None the file is removed.
99-
log_file_folder : str, default: ''
99+
log_file_folder
100100
The folder in which to delete the log-files.
101101
"""
102102
to_rm = _get_all_files(job_names, scheduler)
@@ -154,9 +154,9 @@ def periodically_clean_ipython_profiles(
154154
155155
Parameters
156156
----------
157-
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
157+
scheduler
158158
A scheduler instance from `adaptive_scheduler.scheduler`.
159-
interval : int, default: 600
159+
interval
160160
The interval at which to remove old profiles.
161161
162162
Returns

adaptive_scheduler/_server_support/database_manager.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -129,19 +129,19 @@ class DatabaseManager(BaseManager):
129129
130130
Parameters
131131
----------
132-
url : str
132+
url
133133
The url of the database manager, with the format
134134
``tcp://ip_of_this_machine:allowed_port.``. Use `get_allowed_url`
135135
to get a `url` that will work.
136-
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
136+
scheduler
137137
A scheduler instance from `adaptive_scheduler.scheduler`.
138-
db_fname : str
138+
db_fname
139139
Filename of the database, e.g. 'running.json'.
140-
learners : list of `adaptive.BaseLearner` isinstances
140+
learners
141141
List of `learners` corresponding to `fnames`.
142-
fnames : list
142+
fnames
143143
List of `fnames` corresponding to `learners`.
144-
overwrite_db : bool, default: True
144+
overwrite_db
145145
Overwrite the existing database upon starting.
146146
147147
Attributes

adaptive_scheduler/_server_support/job_manager.py

+21-21
Original file line numberDiff line numberDiff line change
@@ -36,28 +36,28 @@ def command_line_options(
3636
3737
Parameters
3838
----------
39-
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
39+
scheduler
4040
A scheduler instance from `adaptive_scheduler.scheduler`.
4141
database_manager
4242
A database manager instance.
43-
runner_kwargs : dict, default: None
43+
runner_kwargs
4444
Extra keyword argument to pass to the `adaptive.Runner`. Note that this dict
4545
will be serialized and pasted in the ``job_script``.
46-
goal : callable, default: None
46+
goal
4747
The goal passed to the `adaptive.Runner`. Note that this function will
4848
be serialized and pasted in the ``job_script``. Can be a smart-goal
4949
that accepts
5050
``Callable[[adaptive.BaseLearner], bool] | int | float | datetime | timedelta | None``.
5151
See `adaptive_scheduler.utils.smart_goal` for more information.
52-
log_interval : int, default: 300
52+
log_interval
5353
Time in seconds between log entries.
54-
save_interval : int, default: 300
54+
save_interval
5555
Time in seconds between saving of the learners.
56-
save_dataframe : bool
56+
save_dataframe
5757
Whether to periodically save the learner's data as a `pandas.DataFame`.
58-
dataframe_format : str
58+
dataframe_format
5959
The format in which to save the `pandas.DataFame`. See the type hint for the options.
60-
loky_start_method : str
60+
loky_start_method
6161
Loky start method, by default "loky".
6262
6363
Returns
@@ -94,40 +94,40 @@ class JobManager(BaseManager):
9494
9595
Parameters
9696
----------
97-
job_names : list
97+
job_names
9898
List of unique names used for the jobs with the same length as
9999
`learners`. Note that a job name does not correspond to a certain
100100
specific learner.
101-
database_manager : `DatabaseManager`
101+
database_manager
102102
A `DatabaseManager` instance.
103-
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
103+
scheduler
104104
A scheduler instance from `adaptive_scheduler.scheduler`.
105-
interval : int, default: 30
105+
interval
106106
Time in seconds between checking and starting jobs.
107-
max_simultaneous_jobs : int, default: 500
107+
max_simultaneous_jobs
108108
Maximum number of simultaneously running jobs. By default no more than 500
109109
jobs will be running. Keep in mind that if you do not specify a ``runner.goal``,
110110
jobs will run forever, resulting in the jobs that were not initially started
111111
(because of this `max_simultaneous_jobs` condition) to not ever start.
112-
max_fails_per_job : int, default: 40
112+
max_fails_per_job
113113
Maximum number of times that a job can fail. This is here as a fail switch
114114
because a job might fail instantly because of a bug inside your code.
115115
The job manager will stop when
116116
``n_jobs * total_number_of_jobs_failed > max_fails_per_job`` is true.
117-
save_dataframe : bool
117+
save_dataframe
118118
Whether to periodically save the learner's data as a `pandas.DataFame`.
119-
dataframe_format : str
119+
dataframe_format
120120
The format in which to save the `pandas.DataFame`. See the type hint for the options.
121-
loky_start_method : str
121+
loky_start_method
122122
Loky start method, by default "loky".
123-
log_interval : int, default: 300
123+
log_interval
124124
Time in seconds between log entries.
125-
save_interval : int, default: 300
125+
save_interval
126126
Time in seconds between saving of the learners.
127-
runner_kwargs : dict, default: None
127+
runner_kwargs
128128
Extra keyword argument to pass to the `adaptive.Runner`. Note that this dict
129129
will be serialized and pasted in the ``job_script``.
130-
goal : callable, default: None
130+
goal
131131
The goal passed to the `adaptive.Runner`. Note that this function will
132132
be serialized and pasted in the ``job_script``. Can be a smart-goal
133133
that accepts

adaptive_scheduler/_server_support/kill_manager.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -28,17 +28,17 @@ def logs_with_string_or_condition(
2828
2929
Parameters
3030
----------
31-
error : str or callable
31+
error
3232
String that is searched for or callable that is applied
3333
to the log text. Must take a single argument, a list of
3434
strings, and return True if the job has to be killed, or
3535
False if not.
36-
database_manager : `DatabaseManager`
36+
database_manager
3737
A `DatabaseManager` instance.
3838
3939
Returns
4040
-------
41-
has_string : dict
41+
has_string
4242
A list ``(job_name, fnames)``, which have the string inside their log-file.
4343
"""
4444
if isinstance(error, str):
@@ -73,21 +73,21 @@ class KillManager(BaseManager):
7373
7474
Parameters
7575
----------
76-
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
76+
scheduler
7777
A scheduler instance from `adaptive_scheduler.scheduler`.
78-
database_manager : `DatabaseManager`
78+
database_manager
7979
A `DatabaseManager` instance.
80-
error : str or callable, default: "srun: error:"
80+
error
8181
If ``error`` is a string and is found in the log files, the job will
8282
be cancelled and restarted. If it is a callable, it is applied
8383
to the log text. Must take a single argument, a list of
8484
strings, and return True if the job has to be killed, or
8585
False if not.
86-
interval : int, default: 600
86+
interval
8787
Time in seconds between checking for the condition.
88-
max_cancel_tries : int, default: 5
88+
max_cancel_tries
8989
Try maximum `max_cancel_tries` times to cancel a job.
90-
move_to : str, optional
90+
move_to
9191
If a job is cancelled the log is either removed (if ``move_to=None``)
9292
or moved to a folder (e.g. if ``move_to='old_logs'``).
9393
"""

0 commit comments

Comments
 (0)