23
23
# OF THE POSSIBILITY OF SUCH DAMAGE.
24
24
from __future__ import print_function
25
25
26
+ import argparse
26
27
import os
27
28
import re
28
29
from abc import ABCMeta , abstractproperty , abstractmethod
29
30
from os .path import join
30
31
31
32
import mx
32
- from mx_benchmark import StdOutRule , VmRegistry , java_vm_registry , Vm , GuestVm , VmBenchmarkSuite
33
+ from mx_benchmark import StdOutRule , VmRegistry , java_vm_registry , Vm , GuestVm , VmBenchmarkSuite , AveragingBenchmarkMixin
33
34
from mx_graalpython_bench_param import benchmarks_list , harnessPath
34
35
35
36
# ----------------------------------------------------------------------------------------------------------------------
53
54
PYTHON_VM_REGISTRY_NAME = "Python"
54
55
CONFIGURATION_DEFAULT = "default"
55
56
57
+ DEFAULT_ITERATIONS = 10
58
+
56
59
57
60
# ----------------------------------------------------------------------------------------------------------------------
58
61
#
@@ -174,7 +177,7 @@ def config_name(self):
174
177
# the benchmark definition
175
178
#
176
179
# ----------------------------------------------------------------------------------------------------------------------
177
- class PythonBenchmarkSuite (VmBenchmarkSuite ):
180
+ class PythonBenchmarkSuite (VmBenchmarkSuite , AveragingBenchmarkMixin ):
178
181
def __init__ (self , name , harness_path ):
179
182
self ._name = name
180
183
self ._harness_path = harness_path
@@ -193,7 +196,7 @@ def rules(self, output, benchmarks, bm_suite_args):
193
196
r"^### iteration=(?P<iteration>[0-9]+), name=(?P<benchmark>[a-zA-Z0-9.\-]+), duration=(?P<time>[0-9]+(\.[0-9]+)?$)" , # pylint: disable=line-too-long
194
197
{
195
198
"benchmark" : '{}.{}' .format (self ._name , bench_name ),
196
- "metric.name" : "time " ,
199
+ "metric.name" : "warmup " ,
197
200
"metric.iteration" : ("<iteration>" , int ),
198
201
"metric.type" : "numeric" ,
199
202
"metric.value" : ("<time>" , float ),
@@ -205,6 +208,24 @@ def rules(self, output, benchmarks, bm_suite_args):
205
208
),
206
209
]
207
210
211
+ def run (self , benchmarks , bmSuiteArgs ):
212
+ results = super (PythonBenchmarkSuite , self ).run (benchmarks , bmSuiteArgs )
213
+ self .addAverageAcrossLatestResults (results )
214
+ return results
215
+
216
+ def postprocessRunArgs (self , run_args ):
217
+ parser = argparse .ArgumentParser (add_help = False )
218
+ parser .add_argument ("-i" , default = None )
219
+ args , remaining = parser .parse_known_args (run_args )
220
+ if args .i :
221
+ if args .i .isdigit ():
222
+ return ["-i" , args .i ] + remaining
223
+ if args .i == "-1" :
224
+ return remaining
225
+ else :
226
+ iterations = DEFAULT_ITERATIONS + self .getExtraIterationCount (DEFAULT_ITERATIONS )
227
+ return ["-i" , str (iterations )] + remaining
228
+
208
229
def createVmCommandLineArgs (self , benchmarks , run_args ):
209
230
if not benchmarks or len (benchmarks ) != 1 :
210
231
mx .abort ("Please run a specific benchmark (mx benchmark {}:<benchmark-name>) or all the benchmarks "
@@ -214,10 +235,9 @@ def createVmCommandLineArgs(self, benchmarks, run_args):
214
235
215
236
cmd_args = [self ._harness_path , join (self ._bench_path , "{}.py" .format (benchmark ))]
216
237
if len (run_args ) == 0 :
217
- cmd_args .extend (self ._benchmarks [benchmark ])
218
- else :
219
- cmd_args .extend (run_args )
220
-
238
+ run_args = self ._benchmarks [benchmark ]
239
+ run_args = self .postprocessRunArgs (run_args )
240
+ cmd_args .extend (run_args )
221
241
return cmd_args
222
242
223
243
def benchmarkList (self , bm_suite_args ):
0 commit comments