Skip to content
This repository was archived by the owner on Feb 1, 2024. It is now read-only.

Commit a4cac40

Browse files
committed
Update the code, adds 2024 benchmark results
1 parent 5aff8a1 commit a4cac40

File tree

67 files changed

+542
-143
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

67 files changed

+542
-143
lines changed

.gitignore

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
.DS_Store
22
*.tar.gz
33
*.csv
4-
results/
54
browser_profiles/
65
browsertime/
6+
browsertime-results/
7+
__pycache__

add_results.py

+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
#!/usr/bin/env python3
2+
# Copyright (c) 2024 The Brave Authors. All rights reserved.
3+
# This Source Code Form is subject to the terms of the Mozilla Public
4+
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
5+
# You can obtain one at https://mozilla.org/MPL/2.0/.
6+
7+
import argparse
8+
from components.result_map import ResultMap
9+
def main():
10+
parser = argparse.ArgumentParser()
11+
parser.add_argument('csv_file', type=str)
12+
parser.add_argument('browser', type=str)
13+
parser.add_argument('browser_version', type=str)
14+
parser.add_argument('metric', type=str)
15+
parser.add_argument('value_list', type=str)
16+
parser.add_argument('--key', type=str)
17+
args = parser.parse_args()
18+
19+
values = args.value_list.split(',')
20+
results = ResultMap()
21+
for v in values:
22+
value = float(v)
23+
results.addValue(args.browser, args.browser_version, args.metric, args.key, value)
24+
25+
results.write_csv(args.csv_file, True)
26+
27+
main()

benchmark_scripts/basemark.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ async function perfTest(context, commands) {
88
"return document.getElementsByClassName('overall-score')[1]?.textContent")
99
if (value && value != '') {
1010
console.log('got result', value)
11-
commands.measure.addObject({ 'basemark_score': parseFloat(value) });
11+
commands.measure.addObject({ 'basemark': parseFloat(value) });
1212
break;
1313
}
1414
}

benchmark_scripts/jetstream.js

+10-11
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,18 @@
1+
const utils = require('./utils.js')
2+
13
async function perfTest(context, commands) {
24
await commands.measure.start(
3-
'https://browserbench.org/JetStream2.0/');
5+
'https://browserbench.org/JetStream2.2/');
6+
7+
await commands.wait.byXpath('//a[text()="Start Test"]', 2 * 60 * 1000)
48
await commands.js.run('JetStream.start()');
59

6-
while (true) {
7-
await commands.wait.byTime(3000)
8-
raw = await commands.js.run(
9-
"return document.getElementById('result-summary')?.childNodes[0]?.textContent")
10+
const score = await utils.waitForThrottled(commands,
11+
'document.getElementById("result-summary")?.childNodes[0]?.textContent',
12+
10 * 60)
1013

11-
if (raw && raw != '') {
12-
console.log('got result', raw)
13-
commands.measure.addObject({ 'jetstream2_score': parseFloat(raw) });
14-
break;
15-
}
16-
}
14+
console.log('got result =', score)
15+
commands.measure.addObject({ 'jetstream': parseFloat(score) });
1716
await commands.screenshot.take('result')
1817
};
1918

benchmark_scripts/kraken.js

+4-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,10 @@ async function perfTest(context, commands) {
1515
console.error(raw)
1616
}
1717
console.log('got total', m[1], m[2])
18-
commands.measure.addObject({ 'kraken_ms': parseFloat(m[1]) });
18+
commands.measure.addObject({
19+
'kraken_ms': parseFloat(m[1]),
20+
'kraken_error%': parseFloat(m[2])
21+
});
1922
break;
2023
}
2124
}

benchmark_scripts/motionmark.js

+29
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
const utils = require('./utils.js')
2+
3+
async function perfTest(context, commands) {
4+
await commands.measure.start(
5+
'https://browserbench.org/MotionMark1.3');
6+
7+
await commands.wait.byXpath('//button[text()="Run Benchmark"]', 2 * 60 * 1000)
8+
await commands.click.byIdAndWait('start-button');
9+
10+
const score = await utils.waitForThrottled(commands,
11+
'document.querySelector(".score")?.textContent', 10 * 60)
12+
13+
const error = await commands.js.run(
14+
"return document.querySelector('.confidence').textContent.slice(1, -1)")
15+
16+
console.log('got result =', score, 'std =', error)
17+
18+
commands.measure.addObject(
19+
{
20+
'motionmark': parseFloat(score.split(' @')[0]),
21+
'motionmark_error': parseFloat(error)
22+
});
23+
24+
await commands.screenshot.take('result')
25+
};
26+
27+
module.exports = {
28+
test: perfTest
29+
};

benchmark_scripts/speedometer2.js

+20-12
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,26 @@
1+
const utils = require('./utils.js')
2+
13
async function perfTest(context, commands) {
2-
await commands.measure.start(
3-
'https://browserbench.org/Speedometer2.0/?iterationCount=10');
4+
const URL = 'https://www.browserbench.org/Speedometer2.1'
5+
const getResults = 'document.getElementById("result-number")?.textContent';
6+
7+
// One warm up iteration:
8+
await commands.navigate(`${URL}?iterationCount=1`);
9+
await commands.js.run('startTest()');
10+
await utils.waitForThrottled(commands, getResults);
11+
12+
await commands.measure.start(`${URL}?iterationCount=100'`);
413
await commands.js.run('startTest()');
514

6-
while (true) {
7-
await commands.wait.byTime(3000)
8-
value = await commands.js.run(
9-
"return window.document.getElementById('result-number').innerHTML")
10-
if (value && value != '') {
11-
console.log('got result', value)
12-
commands.measure.addObject({ 'speedometer2_score': parseFloat(value) });
13-
break;
14-
}
15-
}
15+
const value = await utils.waitForThrottled(commands, getResults);
16+
const error = await commands.js.run(
17+
'return document.getElementById("confidence-number").textContent.substr(2)')
18+
console.log('got result = ', value, 'std =', error)
19+
commands.measure.addObject(
20+
{
21+
'speedometer2.1': parseFloat(value),
22+
'speedometer2.1_error': parseFloat(error)
23+
});
1624
await commands.screenshot.take('result')
1725
};
1826

benchmark_scripts/speedometer3.js

+34
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
const utils = require('./utils.js')
2+
3+
async function perfTest(context, commands) {
4+
const URL = 'https://www.browserbench.org/Speedometer3.0?startAutomatically=true'
5+
const getResults = 'document.getElementById("result-number")?.textContent';
6+
7+
// One warm up iteration:
8+
await commands.navigate(`${URL}&iterationCount=1`);
9+
await utils.waitForThrottled(commands, getResults, 3 * 60);
10+
11+
await commands.measure.start(`${URL}&iterationCount=10`);
12+
13+
const value = await utils.waitForThrottled(commands, getResults, 10 * 60);
14+
const error = await commands.js.run(
15+
'return document.getElementById("confidence-number").textContent.substr(2)')
16+
17+
console.log('got result = ', value, 'error =', error)
18+
commands.measure.addObject(
19+
{
20+
'speedometer3_avg': parseFloat(value),
21+
'speedometer3_error': parseFloat(error)
22+
});
23+
const raw = await commands.js.run('return JSON.parse(window.benchmarkClient._formattedJSONResult({ modern: true })).Score.values')
24+
commands.measure.addObject(
25+
{
26+
'speedometer3': raw,
27+
});
28+
29+
await commands.screenshot.take('result')
30+
};
31+
32+
module.exports = {
33+
test: perfTest
34+
};

benchmark_scripts/utils.js

+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
async function waitForThrottled(commands, condition, timeoutSeconds = 15 * 60) {
2+
for (let i = 0; i < timeoutSeconds; ++i) {
3+
result = await commands.js.run(`return (${condition})`)
4+
if (result != null && result != '')
5+
return result
6+
await commands.wait.byTime(1000)
7+
}
8+
return false
9+
}
10+
11+
module.exports = {
12+
waitForThrottled: waitForThrottled
13+
};

components/benchmark_measurement.py

+7-2
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def Run(
2323
browser = browser_class()
2424
script = os.path.join('benchmark_scripts', name)
2525
assert os.path.exists(script)
26-
browser.prepare_profile(self.state.unsafe_use_profiles)
26+
browser.prepare_profile()
2727
result_dir = f'browsertime/{browser.name()}/{index}_{name}/{iteration}/'
2828
preURLDelay = 1000 if self.state.low_delays_for_testing else 10000
2929

@@ -33,9 +33,14 @@ def Run(
3333
'--timeouts.script',
3434
str(30 * 60 * 1000),
3535
])
36+
# browsertime/Chrome/0_speedometer2.js/0/screenshots/1
3637

3738
js_metrics = output['extras'][0]
3839
for metric, value in js_metrics.items():
39-
results.append((metric, None, value))
40+
if isinstance(value, list):
41+
for v in value:
42+
results.append((metric, None, v))
43+
else:
44+
results.append((metric, None, float(value)))
4045

4146
return results

0 commit comments

Comments
 (0)