Skip to content

Commit 5431401

Browse files
committed
BatchJob.start_and_wait: add require_success argument
related to ESA-APEx/apex_algorithms#163
1 parent 5067c28 commit 5431401

File tree

3 files changed

+31
-1
lines changed

3 files changed

+31
-1
lines changed

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1212
- `openeo.testing.io.TestDataLoader`: unit test utility to compactly load (and optionally preprocess) tests data (text/JSON/...)
1313
- `openeo.Connection`: automatically retry API requests on `429 Too Many Requests` HTTP errors, with appropriate delay if possible ([#441](https://github.com/Open-EO/openeo-python-client/issues/441))
1414
- Introduced `pixel_tolerance` argument in `openeo.testing.results` helpers to specify the ignorable fraction of significantly differing pixels. ([#776](https://github.com/Open-EO/openeo-python-client/issues/776))
15+
- `BatchJob.start_and_wait()`: add `require_success` argument (on by default) to control whether an exception should be raised automatically on job failure.
1516

1617
### Changed
1718

1819
- `DataCube.apply_dimension()`: not explicitly specifying the `dimension` argument is deprecated and will trigger warnings ([#774](https://github.com/Open-EO/openeo-python-client/issues/774))
20+
- `BatchJob.start_and_wait()`: all arguments must be specified as keyword arguments to eliminate the risk of positional mix-ups between all its heterogeneous arguments and flags.
1921

2022
### Removed
2123

openeo/rest/job.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -273,11 +273,13 @@ def run_synchronous(
273273

274274
def start_and_wait(
275275
self,
276+
*,
276277
print=print,
277278
max_poll_interval: float = DEFAULT_JOB_STATUS_POLL_INTERVAL_MAX,
278279
connection_retry_interval: float = DEFAULT_JOB_STATUS_POLL_CONNECTION_RETRY_INTERVAL,
279280
soft_error_max: int = DEFAULT_JOB_STATUS_POLL_SOFT_ERROR_MAX,
280281
show_error_logs: bool = True,
282+
require_success: bool = True,
281283
) -> BatchJob:
282284
"""
283285
Start the batch job, poll its status and wait till it finishes (or fails)
@@ -287,11 +289,19 @@ def start_and_wait(
287289
:param connection_retry_interval: how long to wait when status poll failed due to connection issue
288290
:param soft_error_max: maximum number of soft errors (e.g. temporary connection glitches) to allow
289291
:param show_error_logs: whether to automatically print error logs when the batch job failed.
292+
:param require_success: whether to raise an exception if the job did not finish successfully.
290293
291294
:return: Handle to the job created at the backend.
292295
293296
.. versionchanged:: 0.37.0
294297
Added argument ``show_error_logs``.
298+
299+
.. versionchanged:: 0.42.0
300+
All arguments must be specified as keyword arguments,
301+
to eliminate the risk of positional mix-ups between heterogeneous arguments and flags.
302+
303+
.. versionchanged:: 0.42.0
304+
Added argument ``require_success``.
295305
"""
296306
# TODO rename `connection_retry_interval` to something more generic?
297307
start_time = time.time()
@@ -354,7 +364,7 @@ def soft_error(message: str):
354364
time.sleep(poll_interval)
355365
poll_interval = min(1.25 * poll_interval, max_poll_interval)
356366

357-
if status != "finished":
367+
if require_success and status != "finished":
358368
# TODO: render logs jupyter-aware in a notebook context?
359369
if show_error_logs:
360370
print(f"Your batch job {self.job_id!r} failed. Error logs:")

tests/rest/test_job.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import json
44
import logging
55
import re
6+
from contextlib import nullcontext
67
from pathlib import Path
78
from typing import Optional
89
from unittest import mock
@@ -321,6 +322,23 @@ def test_execute_batch_with_excessive_soft_errors(con100, requests_mock, tmpdir,
321322
]
322323

323324

325+
@pytest.mark.parametrize(
326+
["require_success", "expectation"],
327+
[
328+
(True, pytest.raises(JobFailedException, match="'job-000' didn't finish successfully")),
329+
(False, contextlib.nullcontext()),
330+
],
331+
)
332+
def test_start_and_wait_with_error_require_success(dummy_backend, require_success, expectation):
333+
dummy_backend.setup_simple_job_status_flow(queued=0, running=1, final="error")
334+
cube = dummy_backend.connection.load_collection("S2").save_result(format="GTiff")
335+
job = cube.create_job()
336+
assert job.status() == "created"
337+
with expectation, fake_time():
338+
job.start_and_wait(require_success=require_success)
339+
assert job.status() == "error"
340+
341+
324342
@httpretty.activate(allow_net_connect=False)
325343
@pytest.mark.parametrize(
326344
["retry_config", "extra_responses", "expectation_context", "expected_sleeps"],

0 commit comments

Comments
 (0)