-
Notifications
You must be signed in to change notification settings - Fork 31
INTPYTHON-847 - Standardize performance testing infrastructure #366
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 11 commits
90ecd0c
d1d6e84
4db9461
02798aa
c4ba5ce
f7e7345
316258f
58f27da
84b5fff
e9acd6b
7c07dfc
a6c5df9
dc32d06
f97ef89
c03d78b
b7167eb
f12d42b
58ab077
87e29ac
213d88e
29ac48f
6a6cde2
c5d3d5b
d9ae14e
263abd0
90643e5
6bb008c
ebc9c8d
10d8355
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -48,6 +48,62 @@ functions: | |
| args: | ||
| - ./.evergreen/run-tests.sh | ||
|
|
||
| "run performance tests": | ||
| - command: subprocess.exec | ||
| type: test | ||
| params: | ||
| binary: bash | ||
| working_dir: "src" | ||
| include_expansions_in_env: [ "DRIVERS_TOOLS", "MONGODB_URI" ] | ||
| args: | ||
| - ./.evergreen/run-perf-tests.sh | ||
|
|
||
| attach benchmark test results: | ||
| - command: attach.results | ||
| params: | ||
| file_location: src/report.json | ||
|
|
||
| send dashboard data: | ||
NoahStapp marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| - command: subprocess.exec | ||
| params: | ||
| binary: bash | ||
| args: | ||
| - .evergreen/perf-submission-setup.sh | ||
| working_dir: src | ||
| include_expansions_in_env: | ||
| - requester | ||
| - revision_order_id | ||
| - project_id | ||
| - version_id | ||
| - build_variant | ||
| - parsed_order_id | ||
| - task_name | ||
| - task_id | ||
| - execution | ||
| - is_mainline | ||
|
||
| type: test | ||
| - command: expansions.update | ||
| params: | ||
| file: src/expansion.yml | ||
| - command: subprocess.exec | ||
| params: | ||
| binary: bash | ||
| args: | ||
| - .evergreen/perf-submission.sh | ||
| working_dir: src | ||
| include_expansions_in_env: | ||
| - requester | ||
| - revision_order_id | ||
| - project_id | ||
| - version_id | ||
| - build_variant | ||
| - parsed_order_id | ||
| - task_name | ||
| - task_id | ||
| - execution | ||
| - is_mainline | ||
| type: test | ||
|
|
||
| "teardown": | ||
| - command: subprocess.exec | ||
| params: | ||
|
|
@@ -67,6 +123,12 @@ tasks: | |
| commands: | ||
| - func: "run unit tests" | ||
|
|
||
| - name: perf-tests | ||
| commands: | ||
| - func: "run performance tests" | ||
| - func: "attach benchmark test results" | ||
| - func: "send dashboard data" | ||
|
|
||
| buildvariants: | ||
| - name: tests-6-noauth-nossl | ||
| display_name: Run Tests 6.0 NoAuth NoSSL | ||
|
|
@@ -111,3 +173,11 @@ buildvariants: | |
| SSL: "ssl" | ||
| tasks: | ||
| - name: run-tests | ||
|
|
||
| - name: performance-benchmarks | ||
| display_name: Performance Benchmarks | ||
| run_on: | ||
| - rhel90-dbx-perf-large | ||
| batchtime: 10080 | ||
| tasks: | ||
| - name: perf-tests | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,15 @@ | ||
| #!/bin/bash | ||
| # We use the requester expansion to determine whether the data is from a mainline evergreen run or not | ||
|
|
||
| set -eu | ||
|
|
||
| # shellcheck disable=SC2154 | ||
| if [ "${requester}" == "commit" ]; then | ||
| echo "is_mainline: true" >> expansion.yml | ||
| else | ||
| echo "is_mainline: false" >> expansion.yml | ||
| fi | ||
|
|
||
| # We parse the username out of the order_id as patches append that in and SPS does not need that information | ||
| # shellcheck disable=SC2154 | ||
| echo "parsed_order_id: $(echo "${revision_order_id}" | awk -F'_' '{print $NF}')" >> expansion.yml |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,25 @@ | ||
| #!/bin/bash | ||
| # We use the requester expansion to determine whether the data is from a mainline evergreen run or not | ||
|
|
||
| set -eu | ||
|
|
||
| # Submit the performance data to the SPS endpoint | ||
| # shellcheck disable=SC2154 | ||
| response=$(curl -s -w "\nHTTP_STATUS:%{http_code}" -X 'POST' \ | ||
| "https://performance-monitoring-api.corp.mongodb.com/raw_perf_results/cedar_report?project=${project_id}&version=${version_id}&variant=${build_variant}&order=${parsed_order_id}&task_name=${task_name}&task_id=${task_id}&execution=${execution}&mainline=${is_mainline}" \ | ||
| -H 'accept: application/json' \ | ||
| -H 'Content-Type: application/json' \ | ||
| -d @results.json) | ||
|
|
||
| http_status=$(echo "$response" | grep "HTTP_STATUS" | awk -F':' '{print $2}') | ||
| response_body=$(echo "$response" | sed '/HTTP_STATUS/d') | ||
|
|
||
| # We want to throw an error if the data was not successfully submitted | ||
| if [ "$http_status" -ne 200 ]; then | ||
| echo "Error: Received HTTP status $http_status" | ||
| echo "Response Body: $response_body" | ||
| exit 1 | ||
| fi | ||
|
|
||
| echo "Response Body: $response_body" | ||
| echo "HTTP Status: $http_status" |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,15 @@ | ||
| #!/usr/bin/bash | ||
|
|
||
| set -eux | ||
|
|
||
| export OUTPUT_FILE="results.json" | ||
|
|
||
| # Install django-mongodb-backend | ||
| /opt/python/3.10/bin/python3 -m venv venv | ||
| . venv/bin/activate | ||
|
Comment on lines
+7
to
+9
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. NIT:
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This I copied over from the existing
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We're going to bring in drivers-evergreen-tools anyway for QE |
||
| python -m pip install -U pip | ||
| pip install -e . | ||
|
|
||
| python .evergreen/run_perf_test.py | ||
| mv tests/performance/$OUTPUT_FILE $OUTPUT_FILE | ||
| mv tests/performance/report.json report.json | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,55 @@ | ||
| import json | ||
| import logging | ||
| import os | ||
| import shlex | ||
| import subprocess | ||
| import sys | ||
| from datetime import datetime | ||
|
|
||
| LOGGER = logging.getLogger("test") | ||
| logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s") | ||
| OUTPUT_FILE = os.environ.get("OUTPUT_FILE") | ||
|
|
||
|
|
||
| def handle_perf(start_time: datetime): | ||
| end_time = datetime.now() | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For duration calculations, it's better practice to use
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is copied from how PyMongo handles it for consistency. I believe we use it there for ease of the different ways we use the timestamps. |
||
| elapsed_secs = (end_time - start_time).total_seconds() | ||
| with open(OUTPUT_FILE) as fid: # noqa: PTH123 | ||
| results = json.load(fid) | ||
| LOGGER.info("results.json:\n%s", json.dumps(results, indent=2)) | ||
|
|
||
| results = { | ||
| "status": "PASS", | ||
| "exit_code": 0, | ||
| "test_file": "BenchMarkTests", | ||
| "start": int(start_time.timestamp()), | ||
| "end": int(end_time.timestamp()), | ||
| "elapsed": elapsed_secs, | ||
| } | ||
| report = {"failures": 0, "results": [results]} | ||
|
||
|
|
||
| LOGGER.info("report.json\n%s", json.dumps(report, indent=2)) | ||
|
|
||
| with open("report.json", "w", newline="\n") as fid: # noqa: PTH123 | ||
| json.dump(report, fid) | ||
|
|
||
|
|
||
| def run_command(cmd: str | list[str], **kwargs) -> None: | ||
| if isinstance(cmd, list): | ||
| cmd = " ".join(cmd) | ||
| LOGGER.info("Running command '%s'...", cmd) | ||
| kwargs.setdefault("check", True) | ||
| try: | ||
| subprocess.run(shlex.split(cmd), **kwargs) # noqa: PLW1510, S603 | ||
| except subprocess.CalledProcessError as e: | ||
| LOGGER.error(e.output) | ||
| LOGGER.error(str(e)) | ||
| sys.exit(e.returncode) | ||
| LOGGER.info("Running command '%s'... done.", cmd) | ||
|
|
||
|
|
||
| os.chdir("tests/performance") | ||
|
|
||
| start_time = datetime.now() | ||
| run_command(["python manage.py test"]) | ||
| handle_perf(start_time) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,23 @@ | ||
| #!/usr/bin/env python | ||
| """Django's command-line utility for administrative tasks.""" | ||
|
|
||
| import os | ||
| import sys | ||
|
|
||
|
|
||
| def main(): | ||
| """Run administrative tasks.""" | ||
| os.environ.setdefault("DJANGO_SETTINGS_MODULE", "perftest.settings") | ||
| try: | ||
| from django.core.management import execute_from_command_line # noqa: PLC0415 | ||
| except ImportError as exc: | ||
| raise ImportError( | ||
| "Couldn't import Django. Are you sure it's installed and " | ||
| "available on your PYTHONPATH environment variable? Did you " | ||
| "forget to activate a virtual environment?" | ||
| ) from exc | ||
| execute_from_command_line(sys.argv) | ||
|
|
||
|
|
||
| if __name__ == "__main__": | ||
| main() |
Uh oh!
There was an error while loading. Please reload this page.