Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions testing/benchmark.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@ This document outlines the benchmarking process for comparing the performance of

```bash
python3 testing/benchmark_hooks.py

# or

hyperfine -i --warmup 1 -r 5 'pre-commit run --config ../testing/benchmark_hook_1.yaml --all-files' 'pre-commit run --config ../testing/benchmark_hook_2.yaml --all-files'
```

## Results
Expand Down
4 changes: 2 additions & 2 deletions testing/benchmark_hook_1.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/cpp-linter/cpp-linter-hooks
rev: v1.1.2
rev: v1.1.3
hooks:
- id: clang-format
args: [--style=file, --version=21]
args: [--style=file]
115 changes: 27 additions & 88 deletions testing/benchmark_hooks.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
#!/usr/bin/env python3
"""
Benchmark script to compare performance of cpp-linter-hooks vs mirrors-clang-format.
Benchmark script to compare performance of cpp-linter-hooks vs mirrors-clang-format using hyperfine.

Usage:
python benchmark_hooks.py
python benchmark_hooks.py
# or directly with hyperfine:
hyperfine --warmup 1 -r 5 'pre-commit run --config ../testing/benchmark_hook_1.yaml --all-files' 'pre-commit run --config ../testing/benchmark_hook_2.yaml --all-files'

Requirements:
- pre-commit must be installed and available in PATH
Expand All @@ -15,8 +17,7 @@

import os
import subprocess
import time
import statistics
import sys

HOOKS = [
{
Expand Down Expand Up @@ -50,95 +51,33 @@ def prepare_code():
pass


def run_hook(config):
cmd = ["pre-commit", "run", "--config", config, "--all-files"]
start = time.perf_counter()
try:
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
# Still record time even if hook fails
pass
end = time.perf_counter()
return end - start


def benchmark():
results = {}
prepare_code()
def run_hyperfine():
os.chdir("examples")
for hook in HOOKS:
subprocess.run(["git", "restore", "."], check=True)
times = []
print(f"\nBenchmarking {hook['name']}...")
for i in range(REPEATS):
subprocess.run(["pre-commit", "clean"])
t = run_hook(hook["config"])
print(f" Run {i + 1}: {t:.3f} seconds")
times.append(t)
results[hook["name"]] = times
return results


def report(results):
headers = ["Hook", "Avg (s)", "Std (s)", "Min (s)", "Max (s)", "Runs"]
col_widths = [max(len(h), 16) for h in headers]
# Calculate max width for each column
for name, times in results.items():
col_widths[0] = max(col_widths[0], len(name))
print("\nBenchmark Results:\n")
# Print header
header_row = " | ".join(h.ljust(w) for h, w in zip(headers, col_widths))
print(header_row)
print("-+-".join("-" * w for w in col_widths))
# Print rows
lines = []
for name, times in results.items():
avg = statistics.mean(times)
std = statistics.stdev(times) if len(times) > 1 else 0.0
min_t = min(times)
max_t = max(times)
row = [
name.ljust(col_widths[0]),
f"{avg:.3f}".ljust(col_widths[1]),
f"{std:.3f}".ljust(col_widths[2]),
f"{min_t:.3f}".ljust(col_widths[3]),
f"{max_t:.3f}".ljust(col_widths[4]),
str(len(times)).ljust(col_widths[5]),
]
print(" | ".join(row))
lines.append(" | ".join(row))
# Save to file
commands = [
f"pre-commit run --config {hook['config']} --all-files" for hook in HOOKS
]
hyperfine_cmd = [
"hyperfine",
"--warmup",
"1",
"-r",
str(REPEATS),
] + commands
print("Running benchmark with hyperfine:")
print(" ".join(hyperfine_cmd))
try:
subprocess.run(hyperfine_cmd, check=True)
except FileNotFoundError:
print(
"hyperfine is not installed. Please install it with 'cargo install hyperfine' or 'brew install hyperfine'."
)
sys.exit(1)
os.chdir("..")
with open(RESULTS_FILE, "w") as f:
f.write(header_row + "\n")
f.write("-+-".join("-" * w for w in col_widths) + "\n")
for line in lines:
f.write(line + "\n")
print(f"\nResults saved to {RESULTS_FILE}")

# Write to GitHub Actions summary
summary_path = os.environ.get("GITHUB_STEP_SUMMARY")
if summary_path:
with open(summary_path, "a") as f:
f.write("## Benchmark Results\n\n")
# Markdown table header
md_header = "| " + " | ".join(headers) + " |\n"
md_sep = "|" + "|".join(["-" * (w + 2) for w in col_widths]) + "|\n"
f.write(md_header)
f.write(md_sep)
for name, times in results.items():
avg = statistics.mean(times)
std = statistics.stdev(times) if len(times) > 1 else 0.0
min_t = min(times)
max_t = max(times)
md_row = f"| {name} | {avg:.3f} | {std:.3f} | {min_t:.3f} | {max_t:.3f} | {len(times)} |\n"
f.write(md_row)
f.write("\n")


def main():
results = benchmark()
report(results)
prepare_code()
run_hyperfine()


if __name__ == "__main__":
Expand Down
Loading