Skip to content

Commit 2ae6dfa

Browse files
committed
test: fix flaky test_request_queue_unlock_requests
1 parent 9cf3794 commit 2ae6dfa

1 file changed

Lines changed: 28 additions & 14 deletions

File tree

tests/integration/test_request_queue.py

Lines changed: 28 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4,18 +4,18 @@
44

55
from typing import TYPE_CHECKING, cast
66

7+
from apify_client._models import LockedRequestQueueHead, RequestQueueHead
8+
79
if TYPE_CHECKING:
810
from apify_client import ApifyClient, ApifyClientAsync
911
from apify_client._models import (
1012
BatchAddResult,
1113
BatchDeleteResult,
1214
ListOfRequestQueues,
1315
ListOfRequests,
14-
LockedRequestQueueHead,
1516
Request,
1617
RequestLockInfo,
1718
RequestQueue,
18-
RequestQueueHead,
1919
RequestRegistration,
2020
UnlockRequestsResult,
2121
)
@@ -421,16 +421,23 @@ async def test_request_queue_list_and_lock_head(client: ApifyClient | ApifyClien
421421
rq_client.add_request({'url': f'https://example.com/lock-{i}', 'unique_key': f'lock-{i}'})
422422
)
423423

424-
# Poll until requests are available for locking (eventual consistency)
425-
lock_response: LockedRequestQueueHead | None = None
424+
# Poll (without side effects) until all requests are visible. Using `list_and_lock_head` for polling
425+
# would lock items across iterations, leading to an ambiguous count of actually-locked requests.
426+
head_response: RequestQueueHead | None = None
426427
for _ in range(5):
427428
await maybe_sleep(1, is_async=is_async)
428-
result = await maybe_await(rq_client.list_and_lock_head(limit=3, lock_duration=timedelta(seconds=60)))
429-
lock_response = cast('LockedRequestQueueHead', result)
430-
if len(lock_response.items) == 3:
429+
result = await maybe_await(rq_client.list_head(limit=5))
430+
assert isinstance(result, RequestQueueHead)
431+
head_response = result
432+
if len(head_response.items) == 5:
431433
break
432434

433-
assert lock_response is not None
435+
assert head_response is not None
436+
assert len(head_response.items) == 5
437+
438+
result = await maybe_await(rq_client.list_and_lock_head(limit=3, lock_duration=timedelta(seconds=60)))
439+
assert isinstance(result, LockedRequestQueueHead)
440+
lock_response = result
434441
assert len(lock_response.items) == 3
435442

436443
# Verify requests are locked
@@ -530,16 +537,23 @@ async def test_request_queue_unlock_requests(client: ApifyClient | ApifyClientAs
530537
rq_client.add_request({'url': f'https://example.com/unlock-{i}', 'unique_key': f'unlock-{i}'})
531538
)
532539

533-
# Poll until requests are available for locking (eventual consistency)
534-
lock_response: LockedRequestQueueHead | None = None
540+
# Poll (without side effects) until all requests are visible. Using `list_and_lock_head` for polling
541+
# would lock items across iterations, leading to an ambiguous count of actually-locked requests.
542+
head_response: RequestQueueHead | None = None
535543
for _ in range(5):
536544
await maybe_sleep(1, is_async=is_async)
537-
result = await maybe_await(rq_client.list_and_lock_head(limit=3, lock_duration=timedelta(seconds=60)))
538-
lock_response = cast('LockedRequestQueueHead', result)
539-
if len(lock_response.items) == 3:
545+
result = await maybe_await(rq_client.list_head(limit=5))
546+
assert isinstance(result, RequestQueueHead)
547+
head_response = result
548+
if len(head_response.items) == 5:
540549
break
541550

542-
assert lock_response is not None
551+
assert head_response is not None
552+
assert len(head_response.items) == 5
553+
554+
result = await maybe_await(rq_client.list_and_lock_head(limit=3, lock_duration=timedelta(seconds=60)))
555+
assert isinstance(result, LockedRequestQueueHead)
556+
lock_response = result
543557
assert len(lock_response.items) == 3
544558

545559
# Unlock all requests

0 commit comments

Comments
 (0)