|
35 | 35 | LOG_LEVEL_ERROR = 40 |
36 | 36 | LOG_LEVEL_CRITICAL = 50 |
37 | 37 |
|
38 | | -# Wait time (seconds) for AsyncLogger.close() best-effort flush |
39 | | -_ASYNC_CLOSE_WAIT_SECS = 0.05 |
40 | | - |
41 | 38 | # Thread-local storage for lock-free operation |
42 | 39 | _thread_local = threading.local() |
43 | 40 |
|
|
47 | 44 | _async_shutdown = threading.Event() |
48 | 45 |
|
49 | 46 |
|
| 47 | +class _FlushSignal: |
| 48 | + """Queue item used to signal a synchronous flush barrier.""" |
| 49 | + |
| 50 | + __slots__ = ("event",) |
| 51 | + |
| 52 | + def __init__(self, event: threading.Event): |
| 53 | + self.event = event |
| 54 | + |
| 55 | + |
50 | 56 |
|
51 | 57 |
|
52 | 58 |
|
53 | 59 | def _async_worker_thread(): |
54 | 60 | """Background worker for async logging.""" |
55 | 61 | batch = [] |
56 | 62 | batch_size = 50 # Optimal batch size for throughput/latency balance |
57 | | - |
58 | | - while not _async_shutdown.is_set(): |
| 63 | + |
| 64 | + while True: |
59 | 65 | try: |
60 | 66 | # Collect batch |
61 | 67 | batch.clear() |
62 | 68 | timeout = 0.1 # 100ms batch timeout |
63 | | - |
| 69 | + shutdown_requested = False |
| 70 | + pending_count = 0 |
| 71 | + |
64 | 72 | try: |
65 | 73 | # Get first item (blocking) |
66 | 74 | item = _async_queue.get(timeout=timeout) |
67 | | - if item is None: # Shutdown signal |
| 75 | + if item is None: |
| 76 | + _async_queue.task_done() |
68 | 77 | break |
| 78 | + if isinstance(item, _FlushSignal): |
| 79 | + item.event.set() |
| 80 | + _async_queue.task_done() |
| 81 | + continue |
| 82 | + |
69 | 83 | batch.append(item) |
70 | | - |
| 84 | + pending_count = 1 |
| 85 | + |
71 | 86 | # Collect additional items (non-blocking) |
72 | 87 | for _ in range(batch_size - 1): |
73 | 88 | try: |
74 | 89 | item = _async_queue.get_nowait() |
75 | | - if item is None: # Shutdown signal |
| 90 | + if item is None: |
| 91 | + _async_queue.task_done() |
| 92 | + shutdown_requested = True |
76 | 93 | break |
| 94 | + if isinstance(item, _FlushSignal): |
| 95 | + item.event.set() |
| 96 | + _async_queue.task_done() |
| 97 | + continue |
77 | 98 | batch.append(item) |
| 99 | + pending_count += 1 |
78 | 100 | except queue.Empty: |
79 | 101 | break |
80 | | - |
| 102 | + |
81 | 103 | except queue.Empty: |
82 | 104 | continue |
83 | | - |
| 105 | + |
84 | 106 | # Process batch |
85 | 107 | if batch: |
86 | 108 | _process_async_batch(batch) |
87 | | - |
| 109 | + for _ in range(pending_count): |
| 110 | + _async_queue.task_done() |
| 111 | + |
| 112 | + if shutdown_requested: |
| 113 | + break |
| 114 | + |
88 | 115 | except Exception: |
89 | 116 | pass # Ignore errors in background thread |
90 | 117 |
|
@@ -255,12 +282,8 @@ def __init__(self, name: str, min_level: int = LOG_LEVEL_INFO): |
255 | 282 | _ensure_async_worker() |
256 | 283 |
|
257 | 284 | def close(self) -> None: |
258 | | - """Best-effort flush of pending messages for this logger by waiting briefly.""" |
259 | | - # Draining items belonging to this logger from the queue would require |
260 | | - # restructuring the shared queue, so we do a best-effort flush by |
261 | | - # waiting briefly for the background worker to catch up. This does not |
262 | | - # guarantee that all enqueued messages have been processed. |
263 | | - time.sleep(_ASYNC_CLOSE_WAIT_SECS) |
| 285 | + """Flush all pending async messages before returning.""" |
| 286 | + self.flush() |
264 | 287 |
|
265 | 288 | def _log_async(self, level: int, message: str, fields: Optional[Dict[str, Any]] = None) -> None: |
266 | 289 | """True asynchronous logging - non-blocking enqueue.""" |
@@ -310,11 +333,11 @@ def exception(self, message: str, **fields) -> None: |
310 | 333 | self._log_async(40, message, fields) |
311 | 334 |
|
312 | 335 | def flush(self) -> None: |
313 | | - """Best-effort, timing-based flush by yielding to the background worker.""" |
314 | | - # For async logger, we can't force immediate flush, but we can yield |
315 | | - # briefly to allow background processing. This does not guarantee that |
316 | | - # all pending messages have been processed. |
317 | | - time.sleep(_ASYNC_CLOSE_WAIT_SECS) |
| 336 | + """Block until all queued async messages are processed.""" |
| 337 | + _ensure_async_worker() |
| 338 | + marker = _FlushSignal(threading.Event()) |
| 339 | + _async_queue.put(marker) |
| 340 | + marker.event.wait() |
318 | 341 |
|
319 | 342 |
|
320 | 343 | # Lock-free logger cache using thread-local storage |
@@ -376,6 +399,9 @@ def shutdown_async_logging() -> None: |
376 | 399 | """Shutdown async logging gracefully.""" |
377 | 400 | global _async_worker |
378 | 401 | if _async_worker and _async_worker.is_alive(): |
| 402 | + # Ensure all enqueued items are processed before stopping the worker. |
| 403 | + _async_queue.join() |
| 404 | + |
379 | 405 | # Send shutdown sentinel so the worker can finish processing |
380 | 406 | try: |
381 | 407 | _async_queue.put_nowait(None) # Shutdown signal |
|
0 commit comments