Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use sphinx-autodoc-typehints #174

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions adaptive_scheduler/_mock_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,15 +43,15 @@ class MockScheduler:

Parameters
----------
startup_delay : int
startup_delay
The waiting before starting the process.
max_running_jobs : int
max_running_jobs
Maximum number of simultaneously running jobs.
refresh_interval : int
refresh_interval
Refresh interval of checking whether proccesses are still running.
bash : str, default: "bash"
bash
``bash`` executable.
url : str, optional
url
The URL of the socket. Defaults to {DEFAULT_URL}.
"""

Expand Down
32 changes: 16 additions & 16 deletions adaptive_scheduler/_scheduler/base_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,33 +27,33 @@ class BaseScheduler(abc.ABC):

Parameters
----------
cores : int
cores
Number of cores per job (so per learner.)
python_executable : str, default: `sys.executable`
python_executable
The Python executable that should run adaptive-scheduler. By default
it uses the same Python as where this function is called.
log_folder : str, default: ""
log_folder
The folder in which to put the log-files.
mpiexec_executable : str, optional
mpiexec_executable
``mpiexec`` executable. By default `mpiexec` will be
used (so probably from ``conda``).
executor_type : str
executor_type
The executor that is used, by default `mpi4py.futures.MPIPoolExecutor` is used.
One can use ``"ipyparallel"``, ``"dask-mpi"``, ``"mpi4py"``,
``"loky"``, or ``"process-pool"``.
num_threads : int, default 1
num_threads
``MKL_NUM_THREADS``, ``OPENBLAS_NUM_THREADS``, ``OMP_NUM_THREADS``, and
``NUMEXPR_NUM_THREADS`` will be set to this number.
extra_scheduler : list, optional
extra_scheduler
Extra ``#SLURM`` (depending on scheduler type)
arguments, e.g. ``["--exclusive=user", "--time=1"]``.
extra_env_vars : list, optional
extra_env_vars
Extra environment variables that are exported in the job
script. e.g. ``["TMPDIR='/scratch'", "PYTHONPATH='my_dir:$PYTHONPATH'"]``.
extra_script : str, optional
extra_script
Extra script that will be executed after any environment variables are set,
but before the main scheduler is run.
batch_folder : str, default: ""
batch_folder
The folder in which to put the batch files.

Returns
Expand Down Expand Up @@ -99,12 +99,12 @@ def queue(self, *, me_only: bool = True) -> dict[str, dict]:

Parameters
----------
me_only : bool, default: True
me_only
Only see your jobs.

Returns
-------
queue : dict
queue
Mapping of ``job_id`` -> `dict` with ``name`` and ``state``, for
example ``{job_id: {"job_name": "TEST_JOB-1", "state": "R" or "Q"}}``.

Expand Down Expand Up @@ -135,7 +135,7 @@ def job_script(self, options: dict[str, Any]) -> str:

Returns
-------
job_script : str
job_script
A job script that can be submitted to the scheduler.
"""

Expand Down Expand Up @@ -174,11 +174,11 @@ def cancel(

Parameters
----------
job_names : list
job_names
List of job names.
with_progress_bar : bool, default: True
with_progress_bar
Display a progress bar using `tqdm`.
max_tries : int, default: 5
max_tries
Maximum number of attempts to cancel a job.
"""

Expand Down
2 changes: 1 addition & 1 deletion adaptive_scheduler/_scheduler/local.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def job_script(self, options: dict[str, Any]) -> str:

Returns
-------
job_script : str
job_script
A job script that can be submitted to PBS.

Notes
Expand Down
2 changes: 1 addition & 1 deletion adaptive_scheduler/_scheduler/pbs.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def job_script(self, options: dict[str, Any]) -> str:

Returns
-------
job_script : str
job_script
A job script that can be submitted to PBS.
"""
job_script = textwrap.dedent(
Expand Down
22 changes: 11 additions & 11 deletions adaptive_scheduler/_scheduler/slurm.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,38 +23,38 @@ class SLURM(BaseScheduler):

Parameters
----------
cores : int | None
cores
Number of cores per job (so per learner.)
Either use `cores` or `nodes` and `cores_per_node`.
nodes : int | None
nodes
Number of nodes per job (so per learner.)
Either `nodes` and `cores_per_node` or use `cores`.
cores_per_node: int | None
Number of cores per node.
Either `nodes` and `cores_per_node` or use `cores`.
partition: str | None
The SLURM partition to submit the job to.
exclusive : bool
exclusive
Whether to use exclusive nodes (e.g., if SLURM it adds ``--exclusive`` as option).
log_folder : str, default: ""
log_folder
The folder in which to put the log-files.
mpiexec_executable : str, optional
mpiexec_executable
``mpiexec`` executable. By default `mpiexec` will be
used (so probably from ``conda``).
executor_type : str
executor_type
The executor that is used, by default `mpi4py.futures.MPIPoolExecutor` is used.
One can use ``"ipyparallel"``, ``"dask-mpi"``, ``"mpi4py"``,
``"loky"``, or ``"process-pool"``.
num_threads : int, default 1
num_threads
``MKL_NUM_THREADS``, ``OPENBLAS_NUM_THREADS``, ``OMP_NUM_THREADS``, and
``NUMEXPR_NUM_THREADS`` will be set to this number.
extra_scheduler : list, optional
extra_scheduler
Extra ``#SLURM`` (depending on scheduler type)
arguments, e.g. ``["--exclusive=user", "--time=1"]``.
extra_env_vars : list, optional
extra_env_vars
Extra environment variables that are exported in the job
script. e.g. ``["TMPDIR='/scratch'", "PYTHONPATH='my_dir:$PYTHONPATH'"]``.
extra_script : str, optional
extra_script
Extra script that will be executed after any environment variables are set,
but before the main scheduler is run.
"""
Expand Down Expand Up @@ -183,7 +183,7 @@ def job_script(self, options: dict[str, Any]) -> str:

Returns
-------
job_script : str
job_script
A job script that can be submitted to SLURM.
"""
job_script = textwrap.dedent(
Expand Down
16 changes: 8 additions & 8 deletions adaptive_scheduler/_server_support/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def get_allowed_url() -> str:

Returns
-------
url : str
url
An url that can be used for the database manager, with the format
``tcp://ip_of_this_machine:allowed_port.``.
"""
Expand Down Expand Up @@ -87,16 +87,16 @@ def cleanup_scheduler_files(

Parameters
----------
job_names : list
job_names
List of job names.
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
scheduler
A scheduler instance from `adaptive_scheduler.scheduler`.
with_progress_bar : bool, default: True
with_progress_bar
Display a progress bar using `tqdm`.
move_to : str, default: None
move_to
Move the file to a different directory.
If None the file is removed.
log_file_folder : str, default: ''
log_file_folder
The folder in which to delete the log-files.
"""
to_rm = _get_all_files(job_names, scheduler)
Expand Down Expand Up @@ -154,9 +154,9 @@ def periodically_clean_ipython_profiles(

Parameters
----------
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
scheduler
A scheduler instance from `adaptive_scheduler.scheduler`.
interval : int, default: 600
interval
The interval at which to remove old profiles.

Returns
Expand Down
12 changes: 6 additions & 6 deletions adaptive_scheduler/_server_support/database_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,19 +129,19 @@ class DatabaseManager(BaseManager):

Parameters
----------
url : str
url
The url of the database manager, with the format
``tcp://ip_of_this_machine:allowed_port.``. Use `get_allowed_url`
to get a `url` that will work.
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
scheduler
A scheduler instance from `adaptive_scheduler.scheduler`.
db_fname : str
db_fname
Filename of the database, e.g. 'running.json'.
learners : list of `adaptive.BaseLearner` isinstances
learners
List of `learners` corresponding to `fnames`.
fnames : list
fnames
List of `fnames` corresponding to `learners`.
overwrite_db : bool, default: True
overwrite_db
Overwrite the existing database upon starting.

Attributes
Expand Down
42 changes: 21 additions & 21 deletions adaptive_scheduler/_server_support/job_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,28 +36,28 @@ def command_line_options(

Parameters
----------
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
scheduler
A scheduler instance from `adaptive_scheduler.scheduler`.
database_manager
A database manager instance.
runner_kwargs : dict, default: None
runner_kwargs
Extra keyword argument to pass to the `adaptive.Runner`. Note that this dict
will be serialized and pasted in the ``job_script``.
goal : callable, default: None
goal
The goal passed to the `adaptive.Runner`. Note that this function will
be serialized and pasted in the ``job_script``. Can be a smart-goal
that accepts
``Callable[[adaptive.BaseLearner], bool] | int | float | datetime | timedelta | None``.
See `adaptive_scheduler.utils.smart_goal` for more information.
log_interval : int, default: 300
log_interval
Time in seconds between log entries.
save_interval : int, default: 300
save_interval
Time in seconds between saving of the learners.
save_dataframe : bool
save_dataframe
Whether to periodically save the learner's data as a `pandas.DataFame`.
dataframe_format : str
dataframe_format
The format in which to save the `pandas.DataFame`. See the type hint for the options.
loky_start_method : str
loky_start_method
Loky start method, by default "loky".

Returns
Expand Down Expand Up @@ -94,40 +94,40 @@ class JobManager(BaseManager):

Parameters
----------
job_names : list
job_names
List of unique names used for the jobs with the same length as
`learners`. Note that a job name does not correspond to a certain
specific learner.
database_manager : `DatabaseManager`
database_manager
A `DatabaseManager` instance.
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
scheduler
A scheduler instance from `adaptive_scheduler.scheduler`.
interval : int, default: 30
interval
Time in seconds between checking and starting jobs.
max_simultaneous_jobs : int, default: 500
max_simultaneous_jobs
Maximum number of simultaneously running jobs. By default no more than 500
jobs will be running. Keep in mind that if you do not specify a ``runner.goal``,
jobs will run forever, resulting in the jobs that were not initially started
(because of this `max_simultaneous_jobs` condition) to not ever start.
max_fails_per_job : int, default: 40
max_fails_per_job
Maximum number of times that a job can fail. This is here as a fail switch
because a job might fail instantly because of a bug inside your code.
The job manager will stop when
``n_jobs * total_number_of_jobs_failed > max_fails_per_job`` is true.
save_dataframe : bool
save_dataframe
Whether to periodically save the learner's data as a `pandas.DataFame`.
dataframe_format : str
dataframe_format
The format in which to save the `pandas.DataFame`. See the type hint for the options.
loky_start_method : str
loky_start_method
Loky start method, by default "loky".
log_interval : int, default: 300
log_interval
Time in seconds between log entries.
save_interval : int, default: 300
save_interval
Time in seconds between saving of the learners.
runner_kwargs : dict, default: None
runner_kwargs
Extra keyword argument to pass to the `adaptive.Runner`. Note that this dict
will be serialized and pasted in the ``job_script``.
goal : callable, default: None
goal
The goal passed to the `adaptive.Runner`. Note that this function will
be serialized and pasted in the ``job_script``. Can be a smart-goal
that accepts
Expand Down
18 changes: 9 additions & 9 deletions adaptive_scheduler/_server_support/kill_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,17 @@ def logs_with_string_or_condition(

Parameters
----------
error : str or callable
error
String that is searched for or callable that is applied
to the log text. Must take a single argument, a list of
strings, and return True if the job has to be killed, or
False if not.
database_manager : `DatabaseManager`
database_manager
A `DatabaseManager` instance.

Returns
-------
has_string : dict
has_string
A list ``(job_name, fnames)``, which have the string inside their log-file.
"""
if isinstance(error, str):
Expand Down Expand Up @@ -73,21 +73,21 @@ class KillManager(BaseManager):

Parameters
----------
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
scheduler
A scheduler instance from `adaptive_scheduler.scheduler`.
database_manager : `DatabaseManager`
database_manager
A `DatabaseManager` instance.
error : str or callable, default: "srun: error:"
error
If ``error`` is a string and is found in the log files, the job will
be cancelled and restarted. If it is a callable, it is applied
to the log text. Must take a single argument, a list of
strings, and return True if the job has to be killed, or
False if not.
interval : int, default: 600
interval
Time in seconds between checking for the condition.
max_cancel_tries : int, default: 5
max_cancel_tries
Try maximum `max_cancel_tries` times to cancel a job.
move_to : str, optional
move_to
If a job is cancelled the log is either removed (if ``move_to=None``)
or moved to a folder (e.g. if ``move_to='old_logs'``).
"""
Expand Down
Loading