Skip to content

Commit 53755c4

Browse files
Fixes Issue #66: ConcurrentTimedRotatingFileHandler rotating gzip files fails
The gzip extension is already present in the return value of `getFilesToDelete` in Python's `TimedRotatingFileHandler` and doesn't needed to be added. This was causing it to fail to rollover properly in Timed mode.
1 parent 6fb63e6 commit 53755c4

File tree

5 files changed

+128
-21
lines changed

5 files changed

+128
-21
lines changed

.vscode/launch.json

+45
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
{
2+
// Use IntelliSense to learn about possible attributes.
3+
// Hover to view descriptions of existing attributes.
4+
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5+
"version": "0.2.0",
6+
"configurations": [
7+
{
8+
"name": "Python: Current File",
9+
"type": "python",
10+
"request": "launch",
11+
"program": "${file}",
12+
"console": "integratedTerminal",
13+
"justMyCode": true
14+
},
15+
{
16+
"name": "CLH PyTests",
17+
"type": "python",
18+
"request": "launch",
19+
"module": "pytest",
20+
"console": "integratedTerminal",
21+
"cwd": "${workspaceFolder}/tests",
22+
"args": [],
23+
"justMyCode": false
24+
},
25+
{
26+
"name": "Concurrent Log Handler basic example",
27+
"type": "python",
28+
"request": "launch",
29+
"program": "${workspaceFolder}/tests/other/test.py",
30+
"console": "integratedTerminal",
31+
"cwd": "${workspaceFolder}/tests/other",
32+
"justMyCode": true
33+
},
34+
{
35+
"name": "CLH old stress test",
36+
"type": "python",
37+
"request": "launch",
38+
"program": "${workspaceFolder}/tests/other/stresstest.py",
39+
"console": "integratedTerminal",
40+
"cwd": "${workspaceFolder}/tests/other",
41+
"args": ["--log-calls", "2000"],
42+
"justMyCode": true
43+
},
44+
]
45+
}

src/concurrent_log_handler/__init__.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -836,8 +836,10 @@ def doRollover(self): # noqa: C901, PLR0912
836836
self.clh.do_gzip(dfn)
837837

838838
if self.backupCount > 0:
839-
for s in self.getFilesToDelete():
840-
os.remove(s + gzip_ext)
839+
# File will already have gzip extension here if applicable
840+
# Thanks to @moynihan
841+
for file in self.getFilesToDelete():
842+
os.remove(file)
841843
# if not self.delay:
842844
# self.stream = self._open()
843845
newRolloverAt = self.computeRollover(currentTime)

tests/other/stresstest.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,12 @@
2828
from time import sleep
2929

3030
# local lib; for testing
31-
from concurrent_log_handler import PY2, ConcurrentRotatingFileHandler, randbits
31+
from concurrent_log_handler import ConcurrentRotatingFileHandler, randbits
3232

33-
__version__ = "$Id$"
3433
__author__ = "Lowell Alleman"
3534

35+
PY2 = False # No longer supporting Python 2.7
36+
3637
# ruff: noqa: F821, E501
3738

3839
# The total amount of rotated files to keep through the test run. Any data accumulated
@@ -252,7 +253,7 @@ def rand_string(str_len):
252253

253254
parser = OptionParser(
254255
usage="usage: %prog",
255-
version=__version__,
256+
# version=__version__,
256257
description="Stress test the concurrent_log_handler module.",
257258
)
258259
parser.add_option(

tests/stresstest.py

+38-13
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ def worker_process(test_opts: TestOptions, process_id: int, rollover_counter):
169169
rollover_counter.increment(file_handler.num_rollovers)
170170

171171

172-
def validate_log_file(test_opts: TestOptions, run_time: float) -> bool:
172+
def validate_log_file(test_opts: TestOptions, run_time: float, expect_all=True) -> bool:
173173
process_tracker = {i: {} for i in range(test_opts.num_processes)}
174174

175175
# Sort log files, starting with the most recent backup
@@ -210,13 +210,14 @@ def validate_log_file(test_opts: TestOptions, run_time: float) -> bool:
210210
process_tracker[process_id][message_id] = msg_state
211211

212212
log_calls = test_opts.log_calls
213-
for process_id, message_ids in process_tracker.items():
214-
if len(message_ids) != log_calls:
215-
print(
216-
f"Error: Missing messages from Process-{process_id}: "
217-
f"len(message_ids) {len(message_ids)} != log_calls {log_calls}"
218-
)
219-
return False
213+
if expect_all:
214+
for process_id, message_ids in process_tracker.items():
215+
if len(message_ids) != log_calls:
216+
print(
217+
f"Error: Missing messages from Process-{process_id}: "
218+
f"len(message_ids) {len(message_ids)} != log_calls {log_calls}"
219+
)
220+
return False
220221
print(
221222
f"{run_time:.2f} seconds to read {chars_read} chars "
222223
f"from {len(all_log_files)} files ({chars_read / run_time:.2f} chars/sec)"
@@ -248,6 +249,33 @@ def run_stress_test(test_opts: TestOptions) -> int:
248249

249250
end_time = time.time()
250251

252+
print(
253+
f"All processes finished. (Rollovers: "
254+
f"{rollover_counter.get_value()} - min was {test_opts.min_rollovers})"
255+
)
256+
257+
# If backupCount is less than 10, assume we want specifically to
258+
# test backupCount. This means, in most cases, we will have deleted
259+
# some logs files and we should not expect to find all log files.
260+
# Therefore we can't do the "expect_all" check where it looks for any
261+
# missing lines. It would be nice to be able to combine these somehow.
262+
MAGIC_BACKUP_COUNT = 10
263+
expect_all_lines = True
264+
backup_count = test_opts.log_opts.get("backupCount", 0)
265+
if backup_count > 0 and backup_count < MAGIC_BACKUP_COUNT:
266+
expect_all_lines = False
267+
log_path = os.path.join(test_opts.log_dir, test_opts.log_file)
268+
log_files = glob.glob(f"{log_path}*")
269+
# Check that backupCount was not exceeded.
270+
# The +1 is because we're counting 'backups' plus the main log file.
271+
if len(log_files) != backup_count + 1:
272+
print(
273+
f"Error: {len(log_files)} log files were created but "
274+
f"we expected {backup_count + 1}. Could indicate a failure "
275+
f"to rotate properly or to delete excessive backups (`backupCount`)."
276+
)
277+
return 1
278+
251279
# Each test should trigger some minimum number of rollovers.
252280
if (
253281
test_opts.min_rollovers
@@ -258,15 +286,12 @@ def run_stress_test(test_opts: TestOptions) -> int:
258286
f"we expected at least {test_opts.min_rollovers}."
259287
)
260288
return 1
261-
print(
262-
f"All processes finished. (Rollovers: "
263-
f"{rollover_counter.get_value()} - min was {test_opts.min_rollovers})"
264-
)
265289

266290
# Check for any omissions or duplications.
267-
if validate_log_file(test_opts, end_time - start_time):
291+
if validate_log_file(test_opts, end_time - start_time, expect_all=expect_all_lines):
268292
print("Stress test passed.")
269293
return 0
294+
270295
print("Stress test failed.")
271296
return 1
272297

tests/test_stresstest.py

+37-3
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,24 @@
33

44
"""
55
Pytest based unit test cases to drive stresstest.py.
6+
7+
See comments about backupCount in stresstest.py. In short,
8+
if backupCount is set here less than 10, we assume that
9+
some logs are deleted before the end of the test and therefore
10+
don't test specifically for missing lines/items.
611
"""
712

813
import pytest
914
from stresstest import TestOptions, run_stress_test
1015

1116
TEST_CASES = {
1217
"default test options": TestOptions(),
18+
"backupCount=3": TestOptions(
19+
log_opts=TestOptions.default_log_opts({"backupCount": 3}),
20+
),
21+
"backupCount=3, use_gzip=True": TestOptions(
22+
log_opts=TestOptions.default_log_opts({"backupCount": 3, "use_gzip": True}),
23+
),
1324
"num_processes=2, log_calls=6_000": TestOptions(
1425
num_processes=2, log_calls=6_000, min_rollovers=80
1526
),
@@ -65,6 +76,32 @@
6576
}
6677
),
6778
),
79+
"backupCount=3, use_gzip=True, use_timed=True, interval=3, log_calls=3_000, num_processes=4": TestOptions(
80+
use_timed=True,
81+
num_processes=4,
82+
log_calls=3_000,
83+
min_rollovers=4,
84+
log_opts=TestOptions.default_timed_log_opts(
85+
{
86+
"backupCount": 3,
87+
"interval": 3,
88+
"use_gzip": True,
89+
}
90+
),
91+
),
92+
"backupCount=4, use_timed=True, interval=4, log_calls=3_000, num_processes=5": TestOptions(
93+
use_timed=True,
94+
num_processes=5,
95+
log_calls=3_000,
96+
min_rollovers=3,
97+
log_opts=TestOptions.default_timed_log_opts(
98+
{
99+
"backupCount": 4,
100+
"interval": 4,
101+
"use_gzip": False,
102+
}
103+
),
104+
),
68105
"use_timed=True, num_processes=15, interval=1, log_calls=5_000, use_gzip=True": TestOptions(
69106
use_timed=True,
70107
log_calls=5_000,
@@ -102,9 +139,6 @@
102139
}
103140
),
104141
),
105-
# TODO: it would be good to have some test cases that verify that backupCount is not exceeded.
106-
# Testing time intervals other than seconds is difficult because the tests would
107-
# take hours unless we find a way to mock things.
108142
}
109143

110144

0 commit comments

Comments
 (0)