Skip to content

Commit ee12b27

Browse files
Refactor into single run_benchmark
1 parent a1c1826 commit ee12b27

File tree

4 files changed

+55
-26
lines changed

4 files changed

+55
-26
lines changed

pyperformance/data-files/benchmarks/MANIFEST

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22

33
name metafile
44
2to3 <local>
5-
argparse_args <local:argparse>
6-
argparse_subparser <local:argparse>
5+
argparse <local>
6+
argparse_subparsers <local:argparse>
77
async_generators <local>
88
async_tree <local>
99
async_tree_cpu_io_mixed <local:async_tree>

pyperformance/data-files/benchmarks/bm_argparse_args/pyproject.toml renamed to pyperformance/data-files/benchmarks/bm_argparse/bm_argparse_subparsers.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,5 @@ urls = {repository = "https://github.com/python/pyperformance"}
66
dynamic = ["version"]
77

88
[tool.pyperformance]
9-
name = "argparse_args"
10-
tag = "argparse"
9+
name = "argparse_subparsers"
10+
extra_opts = ["subparsers"]

pyperformance/data-files/benchmarks/bm_argparse_subparser/pyproject.toml renamed to pyperformance/data-files/benchmarks/bm_argparse/pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,5 @@ urls = {repository = "https://github.com/python/pyperformance"}
66
dynamic = ["version"]
77

88
[tool.pyperformance]
9-
name = "argparse_subparser"
10-
tag = "argparse"
9+
name = "argparse_many_optionals"
10+
extra_opts = ["many_optionals"]

pyperformance/data-files/benchmarks/bm_argparse_subparser/run_benchmark.py renamed to pyperformance/data-files/benchmarks/bm_argparse/run_benchmark.py

Lines changed: 49 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
"""
2-
Benchmark an argparse program with multiple subparsers, each with their own
3-
subcommands, and then parse a series of command-line arguments.
2+
Benchmark argparse programs with:
3+
1) multiple subparsers, each with their own subcommands, and then parse a series of command-line arguments.
4+
2) a large number of optional arguments, and then parse a series of command-line arguments.
45
56
Author: Savannah Ostrowski
67
"""
@@ -9,7 +10,14 @@
910
import pyperf
1011

1112

12-
def create_parser() -> argparse.ArgumentParser:
13+
def generate_arguments(i: int) -> list:
14+
arguments = ["input.txt", "output.txt"]
15+
for i in range(i):
16+
arguments.extend([f"--option{i}", f"value{i}"])
17+
return arguments
18+
19+
20+
def bm_many_optionals() -> argparse.ArgumentParser:
1321
parser = argparse.ArgumentParser(description="A version control system CLI")
1422

1523
parser.add_argument("--version", action="version", version="1.0")
@@ -37,9 +45,7 @@ def create_parser() -> argparse.ArgumentParser:
3745
)
3846

3947
network_group = push_parser.add_argument_group("Network options")
40-
network_group.add_argument(
41-
"--dryrun", action="store_true", help="Simulate changes"
42-
)
48+
network_group.add_argument("--dryrun", action="store_true", help="Simulate changes")
4349
network_group.add_argument(
4450
"--timeout", type=int, default=30, help="Timeout in seconds"
4551
)
@@ -56,10 +62,6 @@ def create_parser() -> argparse.ArgumentParser:
5662
global_group.add_argument("--verbose", action="store_true", help="Verbose output")
5763
global_group.add_argument("--quiet", action="store_true", help="Quiet output")
5864

59-
return parser
60-
61-
62-
def bench_argparse(loops: int) -> None:
6365
argument_lists = [
6466
["--verbose", "add", "file1.txt", "file2.txt"],
6567
["add", "file1.txt", "file2.txt"],
@@ -77,19 +79,46 @@ def bench_argparse(loops: int) -> None:
7779
],
7880
]
7981

80-
parser = create_parser()
81-
range_it = range(loops)
82-
t0 = pyperf.perf_counter()
82+
for arguments in argument_lists:
83+
parser.parse_args(arguments)
84+
85+
86+
def bm_subparsers() -> argparse.ArgumentParser:
87+
parser = argparse.ArgumentParser()
88+
89+
parser.add_argument("input_file", type=str, help="The input file")
90+
parser.add_argument("output_file", type=str, help="The output file")
91+
92+
for i in range(1000):
93+
parser.add_argument(f"--option{i}", type=str, help=f"Optional argument {i}")
94+
95+
argument_lists = [
96+
generate_arguments(500),
97+
generate_arguments(1000),
98+
]
99+
100+
for args in argument_lists:
101+
parser.parse_args(args)
102+
103+
104+
BENCHMARKS = {
105+
"many_optionals": bm_many_optionals,
106+
"subparsers": bm_subparsers,
107+
}
108+
83109

84-
for _ in range_it:
85-
for args in argument_lists:
86-
parser.parse_args(args)
110+
def add_cmdline_args(cmd, args):
111+
cmd.append(args.benchmark)
87112

88-
return pyperf.perf_counter() - t0
89113

114+
def add_parser_args(parser):
115+
parser.add_argument("benchmark", choices=BENCHMARKS, help="Which benchmark to run.")
90116

91117
if __name__ == "__main__":
92-
runner = pyperf.Runner()
93-
runner.metadata["description"] = "Benchmark an argparse program with subparsers"
118+
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
119+
runner.metadata["description"] = "Argparse benchmark"
120+
add_parser_args(runner.argparser)
121+
args = runner.parse_args()
122+
benchmark = args.benchmark
94123

95-
runner.bench_time_func("argparse", bench_argparse)
124+
runner.bench_func(args.benchmark, BENCHMARKS[args.benchmark])

0 commit comments

Comments
 (0)