Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
90ecd0c
Initial WIP
NoahStapp Aug 13, 2025
d1d6e84
First draft of flat model benchmarks
NoahStapp Aug 13, 2025
4db9461
Finish first draft of benchmarks
NoahStapp Aug 14, 2025
02798aa
Linting
NoahStapp Aug 14, 2025
c4ba5ce
Linting again
NoahStapp Aug 15, 2025
f7e7345
Omit perf tests from runtests
NoahStapp Aug 15, 2025
316258f
Add Evergreen automation for perf tests
NoahStapp Aug 15, 2025
58f27da
Fix perf test path
NoahStapp Aug 15, 2025
84b5fff
Fix report and result paths
NoahStapp Aug 15, 2025
e9acd6b
Use original benchmark params
NoahStapp Aug 15, 2025
7c07dfc
Added copyright + documentation
NoahStapp Aug 15, 2025
a6c5df9
Address Tim review
NoahStapp Aug 19, 2025
dc32d06
Use Model.objects.create() where possible
NoahStapp Aug 19, 2025
f97ef89
More review changes
NoahStapp Aug 19, 2025
c03d78b
include_expansions_in_env array
NoahStapp Aug 19, 2025
b7167eb
Add test
NoahStapp Aug 29, 2025
f12d42b
Update test
NoahStapp Aug 29, 2025
58ab077
testing
NoahStapp Sep 4, 2025
87e29ac
WIP
NoahStapp Sep 15, 2025
213d88e
Merge branch 'main' into DRIVERS-2917
NoahStapp Sep 15, 2025
29ac48f
Fix teardown
NoahStapp Oct 7, 2025
6a6cde2
Perf tests run daily
NoahStapp Oct 7, 2025
c5d3d5b
Linting
NoahStapp Oct 7, 2025
d9ae14e
Fix MAX_ITERATION_TIME
NoahStapp Oct 7, 2025
263abd0
Update for new spec changes
NoahStapp Nov 20, 2025
90643e5
Skip codespell on performance json files
NoahStapp Dec 10, 2025
6bb008c
Merge branch 'main' into DRIVERS-2917
NoahStapp Dec 10, 2025
ebc9c8d
Update .evergreen/config.yml
NoahStapp Dec 16, 2025
10d8355
Update .evergreen/config.yml
NoahStapp Dec 16, 2025
c31e77c
address review
NoahStapp Dec 16, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 72 additions & 0 deletions .evergreen/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,64 @@ functions:
args:
- ./.evergreen/run-tests.sh

"run performance tests":
- command: subprocess.exec
type: test
params:
binary: bash
working_dir: "src"
include_expansions_in_env: [ "DRIVERS_TOOLS", "MONGODB_URI" ]
args:
- ./.evergreen/run-perf-tests.sh

"attach benchmark test results":
- command: attach.results
params:
file_location: src/report.json

"send dashboard data":
- command: subprocess.exec
params:
binary: bash
args:
- .evergreen/perf-submission-setup.sh
working_dir: src
include_expansions_in_env: [
"requester",
"revision_order_id",
"project_id",
"version_id",
"build_variant",
"parsed_order_id",
"task_name",
"task_id",
"execution",
"is_mainline"
]
type: test
- command: expansions.update
params:
file: src/expansion.yml
- command: subprocess.exec
params:
binary: bash
args:
- .evergreen/perf-submission.sh
working_dir: src
include_expansions_in_env: [
"requester",
"revision_order_id",
"project_id",
"version_id",
"build_variant",
"parsed_order_id",
"task_name",
"task_id",
"execution",
"is_mainline"
]
type: test

"teardown":
- command: subprocess.exec
params:
Expand All @@ -67,6 +125,12 @@ tasks:
commands:
- func: "run unit tests"

- name: perf-tests
commands:
- func: "run performance tests"
- func: "attach benchmark test results"
- func: "send dashboard data"

buildvariants:
- name: tests-6-noauth-nossl
display_name: Run Tests 6.0 NoAuth NoSSL
Expand Down Expand Up @@ -111,3 +175,11 @@ buildvariants:
SSL: "ssl"
tasks:
- name: run-tests

- name: performance-benchmarks
display_name: Performance Benchmarks
run_on:
- rhel90-dbx-perf-large
batchtime: 1440
tasks:
- name: perf-tests
15 changes: 15 additions & 0 deletions .evergreen/perf-submission-setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/bin/bash
# We use the requester expansion to determine whether the data is from a mainline evergreen run or not

set -eu

# shellcheck disable=SC2154
if [ "${requester}" == "commit" ]; then
echo "is_mainline: true" >> expansion.yml
else
echo "is_mainline: false" >> expansion.yml
fi

# We parse the username out of the order_id as patches append that in and SPS does not need that information
# shellcheck disable=SC2154
echo "parsed_order_id: $(echo "${revision_order_id}" | awk -F'_' '{print $NF}')" >> expansion.yml
25 changes: 25 additions & 0 deletions .evergreen/perf-submission.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#!/bin/bash
# We use the requester expansion to determine whether the data is from a mainline evergreen run or not

set -eu

# Submit the performance data to the SPS endpoint
# shellcheck disable=SC2154
response=$(curl -s -w "\nHTTP_STATUS:%{http_code}" -X 'POST' \
"https://performance-monitoring-api.corp.mongodb.com/raw_perf_results/cedar_report?project=${project_id}&version=${version_id}&variant=${build_variant}&order=${parsed_order_id}&task_name=${task_name}&task_id=${task_id}&execution=${execution}&mainline=${is_mainline}" \
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-d @results.json)

http_status=$(echo "$response" | grep "HTTP_STATUS" | awk -F':' '{print $2}')
response_body=$(echo "$response" | sed '/HTTP_STATUS/d')

# We want to throw an error if the data was not successfully submitted
if [ "$http_status" -ne 200 ]; then
echo "Error: Received HTTP status $http_status"
echo "Response Body: $response_body"
exit 1
fi

echo "Response Body: $response_body"
echo "HTTP Status: $http_status"
15 changes: 15 additions & 0 deletions .evergreen/run-perf-tests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/usr/bin/bash

set -eux

export OUTPUT_FILE="results.json"

# Install django-mongodb-backend
/opt/python/3.10/bin/python3 -m venv venv
. venv/bin/activate
Comment on lines +7 to +9
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

NIT:
Whilst I'd rather not have a new dependency on drivers-evergreen-tools, to future-proof the binary usage, you could add in the find_python function

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This I copied over from the existing run-tests.sh script here. Let's update them both in a separate ticket.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We're going to bring in drivers-evergreen-tools anyway for QE

python -m pip install -U pip
pip install -e .

python .evergreen/run_perf_test.py
mv tests/performance/$OUTPUT_FILE $OUTPUT_FILE
mv tests/performance/report.json report.json
70 changes: 70 additions & 0 deletions .evergreen/run_perf_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import json
import logging
import os
import shlex
import subprocess
import sys
from datetime import datetime
<<<<<<< Updated upstream
=======
from pathlib import Path
>>>>>>> Stashed changes

LOGGER = logging.getLogger("test")
logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s")
OUTPUT_FILE = os.environ.get("OUTPUT_FILE")


def handle_perf(start_time: datetime):
end_time = datetime.now()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For duration calculations, it's better practice to use time.monotonic(); any particular reason to use datetime?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is copied from how PyMongo handles it for consistency. I believe we use it there for ease of the different ways we use the timestamps.

elapsed_secs = (end_time - start_time).total_seconds()
with open(OUTPUT_FILE) as fid: # noqa: PTH123
results = json.load(fid)
LOGGER.info("results.json:\n%s", json.dumps(results, indent=2))

results = {
"status": "PASS",
"exit_code": 0,
"test_file": "BenchMarkTests",
"start": int(start_time.timestamp()),
"end": int(end_time.timestamp()),
"elapsed": elapsed_secs,
}
report = {"results": [results]}

LOGGER.info("report.json\n%s", json.dumps(report, indent=2))

with open("report.json", "w", newline="\n") as fid: # noqa: PTH123
json.dump(report, fid)


def run_command(cmd: str | list[str], **kwargs) -> None:
if isinstance(cmd, list):
cmd = " ".join(cmd)
LOGGER.info("Running command '%s'...", cmd)
kwargs.setdefault("check", True)
try:
subprocess.run(shlex.split(cmd), **kwargs) # noqa: PLW1510, S603
except subprocess.CalledProcessError as e:
LOGGER.error(e.output)
LOGGER.error(str(e))
sys.exit(e.returncode)
LOGGER.info("Running command '%s'... done.", cmd)


start_time = datetime.now()
run_command(["python manage.py test"])
ROOT = Path(__file__).absolute().parent.parent
data_dir = ROOT / "specifications/source/benchmarking/odm-data"
if not data_dir.exists():
run_command("git clone --depth 1 https://github.com/mongodb/specifications.git")
run_command("tar xf flat_models.tgz", cwd=data_dir)
run_command("tar xf nested_models.tgz", cwd=data_dir)

os.chdir("tests/performance")
start_time = datetime.now()
run_command(
["python manage.py test"],
env=os.environ | {"TEST_PATH": str(data_dir), "OUTPUT_FILE": str(ROOT / "results.json")},
)
handle_perf(start_time)
3 changes: 2 additions & 1 deletion .github/workflows/runtests.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,8 @@
x.name
for x in (pathlib.Path(__file__).parent.parent.parent.resolve() / "tests").iterdir()
# Omit GIS tests unless GIS libraries are installed.
if x.name != "gis_tests_"
# Always omit the performance benchmarking suite.
if x.name != "gis_tests_" and x.name != "performance"
]
),
]
Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -123,3 +123,6 @@ directory = "htmlcov"

[tool.rstcheck]
report_level = "WARNING"

[tool.codespell]
skip = "tests/performance/odm-data/flat_models/*.json,tests/performance/odm-data/nested_models/*.json"
23 changes: 23 additions & 0 deletions tests/performance/manage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""

import os
import sys


def main():
"""Run administrative tasks."""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "perftest.settings")
try:
from django.core.management import execute_from_command_line # noqa: PLC0415
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)


if __name__ == "__main__":
main()
Loading