From 41aa93fc2dd5b8cf5e2a55991af6f3832f279304 Mon Sep 17 00:00:00 2001 From: "sezen.leblay" Date: Mon, 10 Feb 2025 09:50:03 +0100 Subject: [PATCH] Extend vulnerability location data with class MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit re-enable aws integrations tests (#3733) feat(tests/test_data_integrity): update test_datadog_external_env for Go v1.72.0 and forward (#3961) Activate ruff rules on tests/ folder (#3999) [python] Avoid passing global tracer to pin in weblog apps (#4004) All classes must declare feature ids (#4003) Extend mypy scope (#4002) Onboarding: bug marker profiling (#4005) Docker SSI: fix scenario (#4006) [ruby] Enable IP blocking tests for Ruby (#3937) Activate ruff rules on tests/ folder (#4007) [nodejs] remove auto login event skip (#3998) Email HTML Injection detection in IAST Java (#3906) Co-authored-by: Mario Vidal Domínguez <60353145+Mariovido@users.noreply.github.com> Add test to check absence of client computed stats (#3812) [java] Skip payara/CROSSED_TRACING_LIBRARIES/prod (#4009) Add GraphQL error extension tests (#3986) Co-authored-by: William Conti Use prod agent, dev agent broke lot of tracers (#4011) update xpassing baggage tests for unimplemented languages (#3773) [NodeJS] skip failing baggage tests (#4015) [python] fix 500 errors in sql queries (#3997) Activate ruff rules on tests/ folder (#4010) Hotfix Fix fuzzer [Nodejs] Enable untrusted deserialization stack trace test for Node.js (#3995) [python] use main again for dev branch (#4008) Co-authored-by: erikayasuda <153395705+erikayasuda@users.noreply.github.com> Co-authored-by: Charles de Beauchesne Revert agent dev fix (#4013) [PHP] Enable rasp telemetry tests for PHP (#3972) [skipci] Update CODEOWNERS for static files (#4012) [Java] Enable more easy wins (#4018) [java] Bump GraalVM system test to JDK 22 (#4001) [NodeJS] skip more failing baggage tests (#4021) [Debugger] Update dotnet Exception Replay tests (#3974) Test multiple rasp during one request (#3989) Add test for location extended data (#3978) Fix APPSEC_NO_STATS scenario name (#4019) Avoid false XPASS on APPSEC_WAF_TELEMETRY (#4029) [java] Enable Test_Blocking_strip_response_headers in some variants (#4033) [java] Remove some outdated manifest entries (#4039) [java] Fix xpass for Test_SecurityEvents_Appsec_Metastruct_Disabled (#4038) Consolidate remote config tests into same directory/file (#4031) [python] use last patch version of python for django weblogs (#4025) crashtracking: assert si_signo is set to 11 (#4023) --- .github/CODEOWNERS | 2 +- .github/actions/get_target_branch/action.yaml | 6 +- .github/workflows/ci.yml | 2 + .github/workflows/run-end-to-end.yml | 10 +- .gitlab-ci.yml | 6 +- .vscode/launch.json | 10 + CHANGELOG.md | 9 + conftest.py | 69 +- docs/weblog/README.md | 11 + manifests/cpp.yml | 1 + manifests/dotnet.yml | 35 +- manifests/golang.yml | 30 + manifests/java.yml | 87 +- manifests/nodejs.yml | 41 +- manifests/php.yml | 36 +- manifests/python.yml | 44 +- manifests/ruby.yml | 46 +- pyproject.toml | 120 +- requirements.txt | 2 + tests/apm_tracing_e2e/test_otel.py | 8 +- tests/apm_tracing_e2e/test_single_span.py | 13 +- tests/apm_tracing_e2e/test_smoke.py | 5 +- .../api_security/test_api_security_rc.py | 10 +- .../api_security/test_apisec_sampling.py | 11 +- tests/appsec/api_security/test_schemas.py | 33 +- tests/appsec/iast/sink/test_code_injection.py | 18 +- .../iast/sink/test_command_injection.py | 16 +- .../iast/sink/test_email_html_injection.py | 20 +- .../iast/sink/test_hardcoded_passwords.py | 40 +- .../iast/sink/test_hardcoded_secrets.py | 43 +- .../appsec/iast/sink/test_header_injection.py | 16 +- .../iast/sink/test_hsts_missing_header.py | 16 +- .../iast/sink/test_insecure_auth_protocol.py | 23 +- .../appsec/iast/sink/test_insecure_cookie.py | 16 +- tests/appsec/iast/sink/test_ldap_injection.py | 16 +- .../iast/sink/test_no_httponly_cookie.py | 16 +- .../iast/sink/test_no_samesite_cookie.py | 16 +- .../iast/sink/test_nosql_mongodb_injection.py | 17 +- tests/appsec/iast/sink/test_path_traversal.py | 16 +- .../iast/sink/test_reflection_injection.py | 16 +- tests/appsec/iast/sink/test_sql_injection.py | 21 +- tests/appsec/iast/sink/test_ssrf.py | 19 +- .../iast/sink/test_template_injection.py | 18 +- .../sink/test_trust_boundary_violation.py | 19 +- .../sink/test_untrusted_deserialization.py | 18 +- .../iast/sink/test_unvalidated_redirect.py | 34 +- .../sink/test_unvalidated_redirect_forward.py | 18 +- tests/appsec/iast/sink/test_weak_cipher.py | 16 +- tests/appsec/iast/sink/test_weak_hash.py | 28 +- .../appsec/iast/sink/test_weak_randomness.py | 16 +- .../iast/sink/test_xcontent_sniffing.py | 16 +- .../appsec/iast/sink/test_xpath_injection.py | 16 +- tests/appsec/iast/sink/test_xss.py | 16 +- tests/appsec/iast/source/test_cookie_value.py | 2 +- .../iast/source/test_graphql_resolver.py | 2 +- tests/appsec/iast/source/test_header_value.py | 2 +- tests/appsec/iast/source/test_kafka_key.py | 2 +- tests/appsec/iast/source/test_kafka_value.py | 2 +- .../appsec/iast/source/test_parameter_name.py | 8 +- tests/appsec/iast/utils.py | 145 +- .../rasp/rasp_non_blocking_ruleset.json | 282 ++++ tests/appsec/rasp/test_lfi.py | 16 + tests/appsec/rasp/test_sqli.py | 2 +- tests/appsec/rasp/utils.py | 10 +- tests/appsec/test_asm_standalone.py | 47 +- tests/appsec/test_automated_login_events.py | 23 +- tests/appsec/test_blocking_addresses.py | 24 +- tests/appsec/test_conf.py | 14 +- tests/appsec/test_customconf.py | 2 +- tests/appsec/test_event_tracking.py | 4 +- .../appsec/test_ip_blocking_full_denylist.py | 4 +- tests/appsec/test_rate_limiter.py | 7 +- tests/appsec/test_reports.py | 4 +- tests/appsec/test_request_blocking.py | 2 +- tests/appsec/test_runtime_activation.py | 1 - .../test_suspicious_attacker_blocking.py | 2 - tests/appsec/test_traces.py | 19 +- .../test_user_blocking_full_denylist.py | 5 +- tests/appsec/waf/test_addresses.py | 8 +- tests/appsec/waf/test_blocking.py | 6 +- tests/appsec/waf/test_reports.py | 16 +- tests/appsec/waf/test_telemetry.py | 19 +- tests/auto_inject/test_auto_inject_chaos.py | 6 +- .../auto_inject/test_auto_inject_guardrail.py | 1 - tests/auto_inject/test_auto_inject_install.py | 14 +- .../auto_inject/test_blocklist_auto_inject.py | 4 +- tests/auto_inject/utils.py | 20 +- ...eplay_inner_dotnet_snapshots_expected.json | 5 +- ...on_replay_inner_dotnet_spans_expected.json | 8 +- ..._multiframe_dotnet_snapshots_expected.json | 32 +- ...ecursion_20_dotnet_snapshots_expected.json | 1443 ++++++++++++++++- ...ay_recursion_20_dotnet_spans_expected.json | 592 ++++++- ...recursion_3_dotnet_snapshots_expected.json | 201 ++- ...lay_recursion_3_dotnet_spans_expected.json | 79 +- ...recursion_4_dotnet_snapshots_expected.json | 599 +++++++ ...lay_recursion_4_dotnet_spans_expected.json | 297 ++++ ...recursion_5_dotnet_snapshots_expected.json | 362 ++++- ...lay_recursion_5_dotnet_spans_expected.json | 170 +- ...perscissors_dotnet_snapshots_expected.json | 15 +- ...ckpaperscissors_dotnet_spans_expected.json | 6 + ...play_simple_dotnet_snapshots_expected.json | 3 + ...n_replay_simple_dotnet_spans_expected.json | 2 + .../test_debugger_exception_replay.py | 52 +- .../test_debugger_expression_language.py | 9 +- tests/debugger/test_debugger_pii.py | 2 - tests/debugger/test_debugger_probe_status.py | 2 +- tests/debugger/test_debugger_symdb.py | 5 +- tests/debugger/utils.py | 30 +- tests/docker_ssi/test_docker_ssi.py | 7 +- tests/docker_ssi/test_docker_ssi_crash.py | 3 +- tests/fuzzer/core.py | 18 +- tests/fuzzer/corpus.py | 10 +- tests/fuzzer/main.py | 1 + tests/fuzzer/request_mutator.py | 6 +- tests/fuzzer/tools/__init__.py | 4 +- tests/fuzzer/tools/_tools.py | 8 +- tests/fuzzer/tools/metrics.py | 20 +- .../crossed_integrations/test_kafka.py | 9 +- .../crossed_integrations/test_kinesis.py | 72 +- .../crossed_integrations/test_rabbitmq.py | 13 +- .../crossed_integrations/test_sns_to_sqs.py | 87 +- .../crossed_integrations/test_sqs.py | 77 +- .../integrations/test_db_integrations_sql.py | 18 +- tests/integrations/test_dsm.py | 134 +- tests/integrations/test_inferred_proxy.py | 8 +- tests/integrations/test_open_telemetry.py | 8 +- tests/integrations/utils.py | 36 +- .../test_k8s_init_image_validator.py | 17 +- .../test_k8s_lib_injection_djm.py | 2 +- .../test_k8s_lib_injection_profiling.py | 3 +- tests/otel/test_context_propagation.py | 5 +- tests/otel_tracing_e2e/test_e2e.py | 5 +- tests/parametric/conftest.py | 66 +- tests/parametric/test_128_bit_traceids.py | 4 +- tests/parametric/test_config_consistency.py | 118 +- tests/parametric/test_crashtracking.py | 32 +- .../parametric/test_dynamic_configuration.py | 30 +- tests/parametric/test_headers_b3.py | 2 +- tests/parametric/test_headers_b3multi.py | 6 +- tests/parametric/test_headers_baggage.py | 93 +- tests/parametric/test_headers_datadog.py | 8 +- tests/parametric/test_headers_none.py | 2 +- tests/parametric/test_headers_precedence.py | 8 +- tests/parametric/test_headers_tracecontext.py | 123 +- .../parametric/test_headers_tracestate_dd.py | 21 +- tests/parametric/test_library_tracestats.py | 93 +- .../test_otel_api_interoperability.py | 70 +- tests/parametric/test_otel_env_vars.py | 3 +- tests/parametric/test_otel_span_methods.py | 107 +- .../parametric/test_otel_span_with_baggage.py | 6 +- tests/parametric/test_otel_tracer.py | 8 +- tests/parametric/test_parametric_endpoints.py | 100 +- tests/parametric/test_partial_flushing.py | 20 +- tests/parametric/test_sampling_span_tags.py | 20 +- tests/parametric/test_span_events.py | 1 - tests/parametric/test_span_links.py | 4 +- tests/parametric/test_span_sampling.py | 50 +- tests/parametric/test_telemetry.py | 134 +- tests/parametric/test_trace_sampling.py | 7 +- tests/parametric/test_tracer.py | 55 +- tests/parametric/test_tracer_flare.py | 23 +- tests/perfs/process.py | 2 +- tests/perfs/test_performances.py | 11 +- .../test_remote_configuration.py | 42 +- tests/serverless/span_pointers/utils.py | 9 +- tests/stats/test_stats.py | 15 +- .../static/config_norm_rules.json | 15 +- .../static/python_config_rules.json | 2 +- tests/test_config_consistency.py | 12 +- tests/test_data_integrity.py | 51 +- tests/test_distributed.py | 6 +- tests/test_graphql.py | 29 +- tests/test_identify.py | 6 +- tests/test_library_conf.py | 41 +- tests/test_profiling.py | 4 +- tests/test_remote_config.py | 24 - tests/test_sampling_rates.py | 4 +- tests/test_scrubbing.py | 2 +- tests/test_semantic_conventions.py | 16 +- tests/test_span_events.py | 13 +- tests/test_standard_tags.py | 4 +- tests/test_telemetry.py | 42 +- tests/test_the_test/test_conventions.py | 2 +- tests/test_the_test/test_decorators.py | 2 +- tests/test_the_test/test_docker_scenario.py | 2 +- tests/test_the_test/test_features.py | 11 +- tests/test_the_test/test_group_rules.py | 66 + tests/test_the_test/test_json_report.py | 8 +- tests/test_the_test/test_scenario_names.py | 19 + tests/test_the_test/test_scrubber.py | 6 +- tests/test_the_test/test_version.py | 34 +- utils/_context/_scenarios/__init__.py | 30 +- utils/_context/_scenarios/default.py | 1 + utils/_context/_scenarios/docker_ssi.py | 38 +- utils/_context/_scenarios/endtoend.py | 16 +- utils/_context/_scenarios/integrations.py | 27 +- utils/_context/_scenarios/parametric.py | 17 +- utils/_context/containers.py | 72 +- utils/_context/library_version.py | 2 +- utils/_decorators.py | 2 +- utils/_features.py | 29 +- utils/_remote_config.py | 15 +- utils/_weblog.py | 57 +- utils/build/build_python_base_images.sh | 12 +- .../Controllers/ExceptionReplayController.cs | 8 + .../dotnet/weblog/Endpoints/DsmEndpoint.cs | 28 +- .../weblog/Endpoints/MessagingEndpoints.cs | 29 +- utils/build/docker/java/akka-http/pom.xml | 20 + utils/build/docker/java/iast-common/pom.xml | 24 + .../iast/utils/EmailExamples.java | 31 + .../iast/utils/mock/MockTransport.java | 26 + .../build/docker/java/jersey-grizzly2/pom.xml | 20 + utils/build/docker/java/parametric/pom.xml | 20 + utils/build/docker/java/play/pom.xml | 20 + utils/build/docker/java/ratpack/pom.xml | 20 + .../build/docker/java/resteasy-netty3/pom.xml | 21 +- .../java/spring-boot-3-native.Dockerfile | 2 +- .../docker/java/spring-boot-3-native/pom.xml | 20 + utils/build/docker/java/spring-boot/pom.xml | 20 + .../system_tests/springboot/App.java | 24 +- .../system_tests/springboot/AppSecIast.java | 16 + .../springboot/aws/KinesisConnector.java | 16 +- .../springboot/aws/SnsConnector.java | 16 +- utils/build/docker/java/vertx3/pom.xml | 20 + utils/build/docker/java/vertx4/pom.xml | 20 + .../docker/java_otel/spring-boot/pom.xml | 20 + utils/build/docker/nodejs/express/graphql.js | 43 +- .../integrations/messaging/aws/kinesis.js | 10 +- .../integrations/messaging/aws/shared.js | 4 + .../express/integrations/messaging/aws/sns.js | 27 +- .../express/integrations/messaging/aws/sqs.js | 18 +- .../build/docker/php/common/rasp/multiple.php | 6 + .../build/docker/python/django-poc.Dockerfile | 2 + .../docker/python/django-py3.13.Dockerfile | 2 +- .../python/django-py3.13.base.Dockerfile | 6 +- utils/build/docker/python/django/app/urls.py | 24 +- utils/build/docker/python/fastapi/main.py | 4 +- .../build/docker/python/flask-poc.Dockerfile | 3 +- .../docker/python/flask-poc.base.Dockerfile | 4 +- utils/build/docker/python/flask/app.py | 74 +- .../integrations/messaging/aws/kinesis.py | 8 +- .../flask/integrations/messaging/aws/sns.py | 50 +- .../flask/integrations/messaging/aws/sqs.py | 35 +- .../flask/integrations/messaging/rabbitmq.py | 6 +- .../parametric/apm_test_client/__main__.py | 5 +- .../parametric/apm_test_client/server.py | 16 +- .../build/docker/python/python3.12.Dockerfile | 2 +- .../docker/python/python3.12.base.Dockerfile | 6 +- .../build/docker/python/uds-flask.Dockerfile | 2 +- .../app/graphql/system_test_schema.rb | 11 +- utils/interfaces/_core.py | 34 +- utils/interfaces/_library/core.py | 8 +- utils/interfaces/_logs.py | 2 +- utils/interfaces/_test_agent.py | 2 +- utils/interfaces/schemas/serve_doc.py | 2 +- utils/otel_validators/validator_log.py | 1 + utils/parametric/_library_client.py | 16 +- utils/proxy/_deserializer.py | 2 +- utils/proxy/core.py | 4 +- utils/proxy/scrubber.py | 16 +- utils/scripts/check_version.sh | 3 +- utils/scripts/compute-workflow-parameters.py | 10 +- utils/scripts/compute_impacted_scenario.py | 4 +- utils/scripts/extract_appsec_waf_rules.py | 2 +- utils/scripts/get-change-log.py | 2 +- utils/scripts/get-image-list.py | 10 +- utils/scripts/get-nightly-logs.py | 4 +- utils/scripts/get-workflow-summary.py | 2 +- utils/scripts/grep-nightly-logs.py | 13 +- utils/scripts/load-binary.sh | 2 +- utils/scripts/markdown_logs.py | 4 +- utils/tools.py | 44 +- utils/virtual_machine/vm_logger.py | 5 +- 273 files changed, 7427 insertions(+), 2023 deletions(-) create mode 100644 tests/appsec/rasp/rasp_non_blocking_ruleset.json create mode 100644 tests/debugger/approvals/exception_replay_recursion_4_dotnet_snapshots_expected.json create mode 100644 tests/debugger/approvals/exception_replay_recursion_4_dotnet_spans_expected.json delete mode 100644 tests/test_remote_config.py create mode 100644 tests/test_the_test/test_group_rules.py create mode 100644 tests/test_the_test/test_scenario_names.py create mode 100644 utils/build/docker/java/iast-common/src/main/java/com/datadoghq/system_tests/iast/utils/EmailExamples.java create mode 100644 utils/build/docker/java/iast-common/src/main/java/com/datadoghq/system_tests/iast/utils/mock/MockTransport.java create mode 100644 utils/build/docker/nodejs/express/integrations/messaging/aws/shared.js create mode 100644 utils/build/docker/php/common/rasp/multiple.php diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 913ce1c6d1..ddd6c49e3b 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -30,4 +30,4 @@ /manifests/ruby.yml @DataDog/ruby-guild @DataDog/asm-ruby # Allows everyone to easily make changes -/tests/telemetry_intake/static/ @DataDog/apm-ecosystems +/tests/telemetry_intake/static/ @DataDog/apm-sdk diff --git a/.github/actions/get_target_branch/action.yaml b/.github/actions/get_target_branch/action.yaml index 884944a2bc..8432bdb341 100644 --- a/.github/actions/get_target_branch/action.yaml +++ b/.github/actions/get_target_branch/action.yaml @@ -16,6 +16,10 @@ runs: id: extract shell: bash run: | - branch=$(echo "${{ inputs.text }}" | grep -ioP '\[(?:java|dotnet|python|ruby|php|golang|cpp|agent|nodejs)@[^]]+(?=\])' | tr -d '[:space:]' || true) + branch=$(echo "${INPUTS_TEXT}" | grep -ioP '\[(?:java|dotnet|python|ruby|php|golang|cpp|agent|nodejs)@[^]]+(?=\])' | tr -d '[:space:]' || true) echo "target-branch=${branch#*@}" >> $GITHUB_OUTPUT + + # the preferred approach to handling untrusted input is to set the value of the expression to an intermediate environment variable + env: + INPUTS_TEXT: ${{ inputs.text }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4c36850207..c2c697c832 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -74,8 +74,10 @@ jobs: run: ./utils/scripts/load-binary.sh ${{ matrix.library }} env: TARGET_BRANCH: "${{ steps.get-target-branch.outputs.target-branch }}" + - name: Get agent artifact run: ./utils/scripts/load-binary.sh agent + # ### appsec-event-rules is now a private repo. The GH_TOKEN provided can't read private repos. # ### skipping this, waiting for a proper solution # - name: Load WAF rules diff --git a/.github/workflows/run-end-to-end.yml b/.github/workflows/run-end-to-end.yml index 9f5353e325..1be3d48de8 100644 --- a/.github/workflows/run-end-to-end.yml +++ b/.github/workflows/run-end-to-end.yml @@ -113,6 +113,9 @@ jobs: - name: Run APPSEC_STANDALONE_V2 scenario if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"APPSEC_STANDALONE_V2"') run: ./run.sh APPSEC_STANDALONE_V2 + - name: Run APPSEC_NO_STATS scenario + if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"APPSEC_NO_STATS"') + run: ./run.sh APPSEC_NO_STATS - name: Run IAST_STANDALONE scenario if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"IAST_STANDALONE"') run: ./run.sh IAST_STANDALONE @@ -135,7 +138,7 @@ jobs: if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"IPV6"') && inputs.library != 'ruby' run: ./run.sh IPV6 - name: Run CROSSED_TRACING_LIBRARIES scenario - if: always() && steps.build.outcome == 'success' && matrix.weblog != 'python3.12' && matrix.weblog != 'django-py3.13' && contains(inputs.scenarios, '"CROSSED_TRACING_LIBRARIES"') + if: always() && steps.build.outcome == 'success' && matrix.weblog != 'python3.12' && matrix.weblog != 'django-py3.13' && matrix.weblog != 'spring-boot-payara' && contains(inputs.scenarios, '"CROSSED_TRACING_LIBRARIES"') # python 3.13 issue : APMAPI-1096 run: ./run.sh CROSSED_TRACING_LIBRARIES env: @@ -203,7 +206,7 @@ jobs: if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"REMOTE_CONFIG_MOCKED_BACKEND_ASM_DD_NOCACHE"') run: ./run.sh REMOTE_CONFIG_MOCKED_BACKEND_ASM_DD_NOCACHE - name: Run AGENT_NOT_SUPPORTING_SPAN_EVENTS scenario - if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, 'AGENT_NOT_SUPPORTING_SPAN_EVENTS') && (inputs.library != 'ruby' || matrix.weblog == 'rack') + if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, 'AGENT_NOT_SUPPORTING_SPAN_EVENTS') run: ./run.sh AGENT_NOT_SUPPORTING_SPAN_EVENTS - name: Run APPSEC_MISSING_RULES scenario # C++ 1.2.0 freeze when the rules file is missing @@ -269,6 +272,9 @@ jobs: - name: Run APPSEC_RASP scenario if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"APPSEC_RASP"') run: ./run.sh APPSEC_RASP + - name: Run APPSEC_RASP_NON_BLOCKING scenario + if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"APPSEC_RASP_NON_BLOCKING"') + run: ./run.sh APPSEC_RASP_NON_BLOCKING - name: Run APPSEC_META_STRUCT_DISABLED scenario if: always() && steps.build.outcome == 'success' && contains(inputs.scenarios, '"APPSEC_META_STRUCT_DISABLED"') run: ./run.sh APPSEC_META_STRUCT_DISABLED diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ddd0f77ce9..d6607abc3f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -295,15 +295,15 @@ x_compute_python_aws_scenarios: parallel: matrix: - ONBOARDING_FILTER_WEBLOG: [test-app-python] - SCENARIO: [HOST_AUTO_INJECTION_INSTALL_SCRIPT] + SCENARIO: [HOST_AUTO_INJECTION_INSTALL_SCRIPT, HOST_AUTO_INJECTION_INSTALL_SCRIPT_PROFILING] - ONBOARDING_FILTER_WEBLOG: [test-app-python-container,test-app-python-alpine] - SCENARIO: [ CONTAINER_AUTO_INJECTION_INSTALL_SCRIPT] + SCENARIO: [CONTAINER_AUTO_INJECTION_INSTALL_SCRIPT, CONTAINER_AUTO_INJECTION_INSTALL_SCRIPT_PROFILING] - ONBOARDING_FILTER_WEBLOG: [ test-app-python, test-app-python-container, test-app-python-alpine ] - SCENARIO: [INSTALLER_AUTO_INJECTION] + SCENARIO: [INSTALLER_AUTO_INJECTION, SIMPLE_AUTO_INJECTION_PROFILING] - ONBOARDING_FILTER_WEBLOG: [test-app-python] SCENARIO: [CHAOS_INSTALLER_AUTO_INJECTION] - ONBOARDING_FILTER_WEBLOG: [test-app-python-multicontainer,test-app-python-multialpine] diff --git a/.vscode/launch.json b/.vscode/launch.json index fb0ae9c432..d21f17e9b4 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -164,6 +164,16 @@ "justMyCode": true, "python": "${workspaceFolder}/venv/bin/python" }, + { + "name": "Replay APPSEC_STANDALONE scenario", + "type": "debugpy", + "request": "launch", + "module": "pytest", + "args": ["-S", "APPSEC_STANDALONE", "-p", "no:warnings", "--replay"], + "console": "integratedTerminal", + "justMyCode": true, + "python": "${workspaceFolder}/venv/bin/python" + }, { "name": "Python: Current File", "type": "python", diff --git a/CHANGELOG.md b/CHANGELOG.md index 03a3c2228c..ccc340c53b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,15 @@ All notable changes to this project will be documented in this file. +### 2025-01 (179 PR merged) + +* 2025-01-20 [Deserialize JSON in multipart](https://github.com/DataDog/system-tests/pull/3854) by @cbeauchesne +* 2025-01-14 [[python] add new python weblog: django-py3.13](https://github.com/DataDog/system-tests/pull/3798) by @christophe-papazian +* 2025-01-09 [Removes CircleCI job](https://github.com/DataDog/system-tests/pull/3792) by @cbeauchesne +* 2025-01-03 [Add an option that skip all tests if scenario contains only xfail/skip](https://github.com/DataDog/system-tests/pull/3768) by @cbeauchesne +* 2025-01-27 [Try to get TARGET_BRANCH from PR's title](https://github.com/DataDog/system-tests/pull/3675) by @iunanua +* 2025-01-30 [[golang] new orchestrion go weblog](https://github.com/DataDog/system-tests/pull/3555) by @eliottness +* 2025-01-16 [Add tests for Service Extension (Envoy External Processing)](https://github.com/DataDog/system-tests/pull/3377) by @e-n-0 ### 2024-12 (138 PR merged) diff --git a/conftest.py b/conftest.py index 02374b3d44..3ba2c65522 100644 --- a/conftest.py +++ b/conftest.py @@ -15,7 +15,7 @@ from manifests.parser.core import load as load_manifests from utils import context -from utils._context._scenarios import scenarios +from utils._context._scenarios import scenarios, Scenario from utils.tools import logger from utils.scripts.junit_report import junit_modifyreport from utils._context.library_version import LibraryVersion @@ -150,16 +150,20 @@ def pytest_configure(config) -> None: config.option.skip_empty_scenario = True # First of all, we must get the current scenario + + current_scenario: Scenario | None = None + for name in dir(scenarios): if name.upper() == config.option.scenario: - context.scenario = getattr(scenarios, name) + current_scenario = getattr(scenarios, name) break - if context.scenario is None: + if current_scenario is not None: + current_scenario.pytest_configure(config) + context.scenario = current_scenario + else: pytest.exit(f"Scenario {config.option.scenario} does not exist", 1) - context.scenario.pytest_configure(config) - if not config.option.replay and not config.option.collectonly: config.option.json_report_file = f"{context.scenario.host_log_folder}/report.json" config.option.xmlpath = f"{context.scenario.host_log_folder}/reportJunit.xml" @@ -184,11 +188,8 @@ def pytest_sessionstart(session) -> None: # called when each test item is collected def _collect_item_metadata(item): - result = { - "details": None, - "testDeclaration": None, - "features": [marker.kwargs["feature_id"] for marker in item.iter_markers("features")], - } + details: str | None = None + test_declaration: str | None = None # get the reason form skip before xfail markers = [*item.iter_markers("skip"), *item.iter_markers("skipif"), *item.iter_markers("xfail")] @@ -197,32 +198,36 @@ def _collect_item_metadata(item): if skip_reason is not None: # if any irrelevant declaration exists, it is the one we need to expose - if skip_reason.startswith("irrelevant") or result["details"] is None: - result["details"] = skip_reason - - if result["details"]: - logger.debug(f"{item.nodeid} => {result['details']} => skipped") - - if result["details"].startswith("irrelevant"): - result["testDeclaration"] = "irrelevant" - elif result["details"].startswith("flaky"): - result["testDeclaration"] = "flaky" - elif result["details"].startswith("bug"): - result["testDeclaration"] = "bug" - elif result["details"].startswith("incomplete_test_app"): - result["testDeclaration"] = "incompleteTestApp" - elif result["details"].startswith("missing_feature"): - result["testDeclaration"] = "notImplemented" - elif "got empty parameter set" in result["details"]: + if skip_reason.startswith("irrelevant") or details is None: + details = skip_reason + + if details is not None: + logger.debug(f"{item.nodeid} => {details} => skipped") + + if details.startswith("irrelevant"): + test_declaration = "irrelevant" + elif details.startswith("flaky"): + test_declaration = "flaky" + elif details.startswith("bug"): + test_declaration = "bug" + elif details.startswith("incomplete_test_app"): + test_declaration = "incompleteTestApp" + elif details.startswith("missing_feature"): + test_declaration = "notImplemented" + elif "got empty parameter set" in details: # Case of a test with no parameters. Onboarding: we removed the parameter/machine with excludedBranches logger.info(f"No parameters found for ${item.nodeid}") else: - raise ValueError(f"Unexpected test declaration for {item.nodeid} : {result['details']}") + raise ValueError(f"Unexpected test declaration for {item.nodeid} : {details}") - return result + return { + "details": details, + "testDeclaration": test_declaration, + "features": [marker.kwargs["feature_id"] for marker in item.iter_markers("features")], + } -def _get_skip_reason_from_marker(marker): +def _get_skip_reason_from_marker(marker) -> str | None: if marker.name == "skipif": if all(marker.args): return marker.kwargs.get("reason", "") @@ -443,7 +448,7 @@ def pytest_runtest_call(item) -> None: @pytest.hookimpl(optionalhook=True) -def pytest_json_runtest_metadata(item, call) -> None: +def pytest_json_runtest_metadata(item, call) -> None | dict: if call.when != "setup": return {} @@ -521,7 +526,7 @@ def export_feature_parity_dashboard(session, data) -> None: json.dump(result, f, indent=2) -def convert_test_to_feature_parity_model(test) -> dict: +def convert_test_to_feature_parity_model(test) -> dict | None: result = { "path": test["nodeid"], "lineNumber": test["lineno"], diff --git a/docs/weblog/README.md b/docs/weblog/README.md index 97a80fe6a2..fc5bac8417 100644 --- a/docs/weblog/README.md +++ b/docs/weblog/README.md @@ -768,6 +768,17 @@ Examples: - `GET`: `/rasp/ssrf?user_id="' OR 1 = 1 --"` - `POST`: `{"user_id": "' OR 1 = 1 --"}` +### \[GET\] /rasp/multiple +The idea of this endpoint is to have an endpoint where multiple rasp operation take place. All of them will generate a MATCH on the WAF but none of them will block. The goal of this endpoint is to verify that the `rasp.rule.match` telemetry entry is updated properly. While this seems easy, the WAF requires that data given on `call` is passed as ephemeral and not as persistent. + +In order to make the test easier, the operation used here need to generate LFI matches. The request will have two get parameters(`file1`, `file2`) which will contain a path that needs to be used as the parameters of the choosen lfi function. Then there will be another call to the lfi function with a harcoded parameter `'../etc/passwd'`. This will make `rasp.rule.match` to be equal to 3. A code example look like: + +``` +lfi_operation($request->get('file1')) +lfi_operation($request->get('file2')) +lfi_operation('../etc/passwd') //This one is harcoded +``` + ### GET /dsm/inject This endpoint is used to validate DSM context injection injects the correct encoding to a headers carrier. diff --git a/manifests/cpp.yml b/manifests/cpp.yml index bf39d60399..6b31fc0f05 100644 --- a/manifests/cpp.yml +++ b/manifests/cpp.yml @@ -160,6 +160,7 @@ tests/: Test_Config_TraceEnabled: v1.0.1.dev Test_Config_TraceLogDirectory: missing_feature Test_Config_UnifiedServiceTagging: v1.0.1.dev + Test_Stable_Config_Default: missing_feature test_crashtracking.py: missing_feature test_dynamic_configuration.py: TestDynamicConfigV1_EmptyServiceTargets: missing_feature diff --git a/manifests/dotnet.yml b/manifests/dotnet.yml index f7e127cf72..f7f08cc5be 100644 --- a/manifests/dotnet.yml +++ b/manifests/dotnet.yml @@ -31,19 +31,24 @@ tests/: sink/: test_code_injection.py: TestCodeInjection: missing_feature + TestCodeInjection_ExtendedLocation: missing_feature TestCodeInjection_StackTrace: missing_feature test_command_injection.py: TestCommandInjection: v2.28.0 + TestCommandInjection_ExtendedLocation: missing_feature TestCommandInjection_StackTrace: missing_feature test_email_html_injection.py: TestEmailHtmlInjection: v3.2.0 + TestEmailHtmlInjection_ExtendedLocation: missing_feature TestEmailHtmlInjection_StackTrace: missing_feature test_hardcoded_passwords.py: Test_HardcodedPasswords: missing_feature + Test_HardcodedPasswords_ExtendedLocation: missing_feature Test_HardcodedPasswords_StackTrace: missing_feature test_hardcoded_secrets.py: Test_HardcodedSecrets: missing_feature Test_HardcodedSecretsExtended: missing_feature + Test_HardcodedSecrets_ExtendedLocation: missing_feature Test_HardcodedSecrets_StackTrace: missing_feature test_header_injection.py: TestHeaderInjection: v2.46.0 @@ -51,78 +56,102 @@ tests/: TestHeaderInjectionExclusionContentEncoding: missing_feature TestHeaderInjectionExclusionPragma: missing_feature TestHeaderInjectionExclusionTransferEncoding: missing_feature + TestHeaderInjection_ExtendedLocation: missing_feature TestHeaderInjection_StackTrace: missing_feature test_hsts_missing_header.py: Test_HstsMissingHeader: v2.44.0 + Test_HstsMissingHeader_ExtendedLocation: missing_feature Test_HstsMissingHeader_StackTrace: missing_feature test_insecure_auth_protocol.py: Test_InsecureAuthProtocol: v2.49.0 + Test_InsecureAuthProtocol_ExtendedLocation: missing_feature Test_InsecureAuthProtocol_StackTrace: missing_feature test_insecure_cookie.py: TestInsecureCookie: v2.39.0 TestInsecureCookieNameFilter: missing_feature + TestInsecureCookie_ExtendedLocation: missing_feature TestInsecureCookie_StackTrace: missing_feature test_ldap_injection.py: TestLDAPInjection: v2.36.0 + TestLDAPInjection_ExtendedLocation: missing_feature TestLDAPInjection_StackTrace: missing_feature test_no_httponly_cookie.py: TestNoHttponlyCookie: v2.39.0 TestNoHttponlyCookieNameFilter: missing_feature + TestNoHttponlyCookie_ExtendedLocation: missing_feature TestNoHttponlyCookie_StackTrace: missing_feature test_no_samesite_cookie.py: TestNoSamesiteCookie: v2.39.0 TestNoSamesiteCookieNameFilter: missing_feature + TestNoSamesiteCookie_ExtendedLocation: missing_feature TestNoSamesiteCookie_StackTrace: missing_feature test_nosql_mongodb_injection.py: TestNoSqlMongodbInjection: v2.47.0 + TestNoSqlMongodbInjection_ExtendedLocation: missing_feature TestNoSqlMongodbInjection_StackTrace: missing_feature test_path_traversal.py: TestPathTraversal: v2.31.0 + TestPathTraversal_ExtendedLocation: missing_feature TestPathTraversal_StackTrace: missing_feature test_reflection_injection.py: TestReflectionInjection: v2.48.0 + TestReflectionInjection_ExtendedLocation: missing_feature TestReflectionInjection_StackTrace: missing_feature test_sql_injection.py: TestSqlInjection: - '*': v2.23.0 + '*': v2.23.0 + TestSqlInjection_ExtendedLocation: missing_feature TestSqlInjection_StackTrace: missing_feature test_ssrf.py: TestSSRF: v2.36.0 + TestSSRF_ExtendedLocation: missing_feature TestSSRF_StackTrace: missing_feature test_template_injection.py: TestTemplateInjection: missing_feature + TestTemplateInjection_ExtendedLocation: missing_feature test_trust_boundary_violation.py: Test_TrustBoundaryViolation: v2.43.0 + Test_TrustBoundaryViolation_ExtendedLocation: missing_feature Test_TrustBoundaryViolation_StackTrace: missing_feature test_untrusted_deserialization.py: TestUntrustedDeserialization: missing_feature + TestUntrustedDeserialization_ExtendedLocation: missing_feature TestUntrustedDeserialization_StackTrace: missing_feature test_unvalidated_redirect.py: TestUnvalidatedHeader: v2.44.0 + TestUnvalidatedHeader_ExtendedLocation: missing_feature TestUnvalidatedHeader_StackTrace: missing_feature TestUnvalidatedRedirect: v2.44.0 + TestUnvalidatedRedirect_ExtendedLocation: missing_feature TestUnvalidatedRedirect_StackTrace: missing_feature test_unvalidated_redirect_forward.py: TestUnvalidatedForward: missing_feature + TestUnvalidatedForward_ExtendedLocation: missing_feature TestUnvalidatedForward_StackTrace: missing_feature test_weak_cipher.py: TestWeakCipher: v2.24.0 + TestWeakCipher_ExtendedLocation: missing_feature TestWeakCipher_StackTrace: missing_feature test_weak_hash.py: TestDeduplication: v2.24.0 TestWeakHash: v2.24.0 + TestWeakHash_ExtendedLocation: missing_feature TestWeakHash_StackTrace: missing_feature test_weak_randomness.py: TestWeakRandomness: v2.39.0 + TestWeakRandomness_ExtendedLocation: missing_feature TestWeakRandomness_StackTrace: missing_feature test_xcontent_sniffing.py: Test_XContentSniffing: missing_feature + Test_XContentSniffing_ExtendedLocation: missing_feature Test_XContentSniffing_StackTrace: missing_feature test_xpath_injection.py: TestXPathInjection: v2.47.0 + TestXPathInjection_ExtendedLocation: missing_feature TestXPathInjection_StackTrace: missing_feature test_xss.py: TestXSS: missing_feature + TestXSS_ExtendedLocation: missing_feature TestXSS_StackTrace: missing_feature source/: test_body.py: @@ -183,6 +212,7 @@ tests/: Test_Lfi_Rules_Version: v3.7.0 Test_Lfi_StackTrace: v2.51.0 Test_Lfi_Telemetry: v2.51.0 + Test_Lfi_Telemetry_Multiple_Exploits: missing_feature Test_Lfi_UrlQuery: v2.51.0 Test_Lfi_Waf_Version: v3.4.1 test_shi.py: @@ -241,7 +271,7 @@ tests/: test_blocking.py: Test_Blocking: v2.27.0 Test_Blocking_strip_response_headers: missing_feature - Test_CustomBlockingResponse: missing_feature + Test_CustomBlockingResponse: v3.10.0 test_custom_rules.py: Test_CustomRules: v2.30.0 test_exclusions.py: @@ -421,6 +451,7 @@ tests/: Test_Config_TraceEnabled: v3.3.0 Test_Config_TraceLogDirectory: v3.3.0 Test_Config_UnifiedServiceTagging: v3.3.0 + Test_Stable_Config_Default: missing_feature test_crashtracking.py: Test_Crashtracking: v3.2.0 test_dynamic_configuration.py: diff --git a/manifests/golang.yml b/manifests/golang.yml index 5b8c24e73a..e81fd345bb 100644 --- a/manifests/golang.yml +++ b/manifests/golang.yml @@ -42,19 +42,24 @@ tests/: sink/: test_code_injection.py: TestCodeInjection: missing_feature + TestCodeInjection_ExtendedLocation: missing_feature TestCodeInjection_StackTrace: missing_feature test_command_injection.py: TestCommandInjection: missing_feature + TestCommandInjection_ExtendedLocation: missing_feature TestCommandInjection_StackTrace: missing_feature test_email_html_injection.py: TestEmailHtmlInjection: missing_feature + TestEmailHtmlInjection_ExtendedLocation: missing_feature TestEmailHtmlInjection_StackTrace: missing_feature test_hardcoded_passwords.py: Test_HardcodedPasswords: missing_feature + Test_HardcodedPasswords_ExtendedLocation: missing_feature Test_HardcodedPasswords_StackTrace: missing_feature test_hardcoded_secrets.py: Test_HardcodedSecrets: missing_feature Test_HardcodedSecretsExtended: missing_feature + Test_HardcodedSecrets_ExtendedLocation: missing_feature Test_HardcodedSecrets_StackTrace: missing_feature test_header_injection.py: TestHeaderInjection: missing_feature @@ -62,78 +67,102 @@ tests/: TestHeaderInjectionExclusionContentEncoding: missing_feature TestHeaderInjectionExclusionPragma: missing_feature TestHeaderInjectionExclusionTransferEncoding: missing_feature + TestHeaderInjection_ExtendedLocation: missing_feature TestHeaderInjection_StackTrace: missing_feature test_hsts_missing_header.py: Test_HstsMissingHeader: missing_feature + Test_HstsMissingHeader_ExtendedLocation: missing_feature Test_HstsMissingHeader_StackTrace: missing_feature test_insecure_auth_protocol.py: Test_InsecureAuthProtocol: missing_feature + Test_InsecureAuthProtocol_ExtendedLocation: missing_feature Test_InsecureAuthProtocol_StackTrace: missing_feature test_insecure_cookie.py: TestInsecureCookie: missing_feature TestInsecureCookieNameFilter: missing_feature + TestInsecureCookie_ExtendedLocation: missing_feature TestInsecureCookie_StackTrace: missing_feature test_ldap_injection.py: TestLDAPInjection: missing_feature + TestLDAPInjection_ExtendedLocation: missing_feature TestLDAPInjection_StackTrace: missing_feature test_no_httponly_cookie.py: TestNoHttponlyCookie: missing_feature TestNoHttponlyCookieNameFilter: missing_feature + TestNoHttponlyCookie_ExtendedLocation: missing_feature TestNoHttponlyCookie_StackTrace: missing_feature test_no_samesite_cookie.py: TestNoSamesiteCookie: missing_feature TestNoSamesiteCookieNameFilter: missing_feature + TestNoSamesiteCookie_ExtendedLocation: missing_feature TestNoSamesiteCookie_StackTrace: missing_feature test_nosql_mongodb_injection.py: TestNoSqlMongodbInjection: missing_feature + TestNoSqlMongodbInjection_ExtendedLocation: missing_feature TestNoSqlMongodbInjection_StackTrace: missing_feature test_path_traversal.py: TestPathTraversal: missing_feature + TestPathTraversal_ExtendedLocation: missing_feature TestPathTraversal_StackTrace: missing_feature test_reflection_injection.py: TestReflectionInjection: missing_feature + TestReflectionInjection_ExtendedLocation: missing_feature TestReflectionInjection_StackTrace: missing_feature test_sql_injection.py: TestSqlInjection: missing_feature + TestSqlInjection_ExtendedLocation: missing_feature TestSqlInjection_StackTrace: missing_feature test_ssrf.py: TestSSRF: missing_feature + TestSSRF_ExtendedLocation: missing_feature TestSSRF_StackTrace: missing_feature test_template_injection.py: TestTemplateInjection: missing_feature + TestTemplateInjection_ExtendedLocation: missing_feature test_trust_boundary_violation.py: Test_TrustBoundaryViolation: missing_feature + Test_TrustBoundaryViolation_ExtendedLocation: missing_feature Test_TrustBoundaryViolation_StackTrace: missing_feature test_untrusted_deserialization.py: TestUntrustedDeserialization: missing_feature + TestUntrustedDeserialization_ExtendedLocation: missing_feature TestUntrustedDeserialization_StackTrace: missing_feature test_unvalidated_redirect.py: TestUnvalidatedHeader: missing_feature + TestUnvalidatedHeader_ExtendedLocation: missing_feature TestUnvalidatedHeader_StackTrace: missing_feature TestUnvalidatedRedirect: missing_feature + TestUnvalidatedRedirect_ExtendedLocation: missing_feature TestUnvalidatedRedirect_StackTrace: missing_feature test_unvalidated_redirect_forward.py: TestUnvalidatedForward: missing_feature + TestUnvalidatedForward_ExtendedLocation: missing_feature TestUnvalidatedForward_StackTrace: missing_feature test_weak_cipher.py: TestWeakCipher: missing_feature + TestWeakCipher_ExtendedLocation: missing_feature TestWeakCipher_StackTrace: missing_feature test_weak_hash.py: TestDeduplication: missing_feature TestWeakHash: missing_feature + TestWeakHash_ExtendedLocation: missing_feature TestWeakHash_StackTrace: missing_feature test_weak_randomness.py: TestWeakRandomness: missing_feature + TestWeakRandomness_ExtendedLocation: missing_feature TestWeakRandomness_StackTrace: missing_feature test_xcontent_sniffing.py: Test_XContentSniffing: missing_feature + Test_XContentSniffing_ExtendedLocation: missing_feature Test_XContentSniffing_StackTrace: missing_feature test_xpath_injection.py: TestXPathInjection: missing_feature + TestXPathInjection_ExtendedLocation: missing_feature TestXPathInjection_StackTrace: missing_feature test_xss.py: TestXSS: '*': missing_feature + TestXSS_ExtendedLocation: missing_feature TestXSS_StackTrace: missing_feature source/: test_body.py: @@ -553,6 +582,7 @@ tests/: Test_Config_TraceEnabled: v1.67.0 Test_Config_TraceLogDirectory: v1.70.0 Test_Config_UnifiedServiceTagging: bug (APMAPI-746) + Test_Stable_Config_Default: missing_feature test_crashtracking.py: missing_feature test_dynamic_configuration.py: TestDynamicConfigSamplingRules: v1.64.0 diff --git a/manifests/java.yml b/manifests/java.yml index 8d1aab57d0..ea29a775be 100644 --- a/manifests/java.yml +++ b/manifests/java.yml @@ -61,6 +61,7 @@ tests/: sink/: test_code_injection.py: TestCodeInjection: missing_feature + TestCodeInjection_ExtendedLocation: missing_feature TestCodeInjection_StackTrace: missing_feature test_command_injection.py: TestCommandInjection: @@ -73,16 +74,37 @@ tests/: spring-boot-3-native: missing_feature (GraalVM. Tracing support only) vertx3: v1.12.0 vertx4: v1.12.0 + TestCommandInjection_ExtendedLocation: missing_feature TestCommandInjection_StackTrace: '*': v1.43.0 play: missing_feature ratpack: missing_feature spring-boot-3-native: missing_feature (GraalVM. Tracing support only) test_email_html_injection.py: - TestEmailHtmlInjection: missing_feature - TestEmailHtmlInjection_StackTrace: missing_feature + TestEmailHtmlInjection: + '*': v1.47.0 + akka-http: missing_feature (No endpoint implemented) + jersey-grizzly2: missing_feature (No endpoint implemented) + play: missing_feature (No endpoint implemented) + ratpack: missing_feature (No endpoint implemented) + resteasy-netty3: missing_feature (No endpoint implemented) + spring-boot-3-native: missing_feature (No endpoint implemented) + vertx3: missing_feature (No endpoint implemented) + vertx4: missing_feature (No endpoint implemented) + TestEmailHtmlInjection_ExtendedLocation: missing_feature + TestEmailHtmlInjection_StackTrace: + '*': v1.47.0 + akka-http: missing_feature (No endpoint implemented) + jersey-grizzly2: missing_feature (No endpoint implemented) + play: missing_feature (No endpoint implemented) + ratpack: missing_feature (No endpoint implemented) + resteasy-netty3: missing_feature (No endpoint implemented) + spring-boot-3-native: missing_feature (No endpoint implemented) + vertx3: missing_feature (No endpoint implemented) + vertx4: missing_feature (No endpoint implemented) test_hardcoded_passwords.py: Test_HardcodedPasswords: missing_feature + Test_HardcodedPasswords_ExtendedLocation: missing_feature Test_HardcodedPasswords_StackTrace: missing_feature test_hardcoded_secrets.py: Test_HardcodedSecrets: @@ -95,6 +117,7 @@ tests/: spring-boot-wildfly: v1.29.0 uds-spring-boot: v1.29.0 Test_HardcodedSecretsExtended: missing_feature + Test_HardcodedSecrets_ExtendedLocation: missing_feature Test_HardcodedSecrets_StackTrace: irrelevant (not expected to have a stack trace) test_header_injection.py: TestHeaderInjection: @@ -110,6 +133,7 @@ tests/: TestHeaderInjectionExclusionContentEncoding: missing_feature TestHeaderInjectionExclusionPragma: missing_feature TestHeaderInjectionExclusionTransferEncoding: missing_feature + TestHeaderInjection_ExtendedLocation: missing_feature TestHeaderInjection_StackTrace: '*': missing_feature spring-boot: v1.43.0 @@ -131,6 +155,7 @@ tests/: spring-boot-openliberty: bug (APPSEC-51483) vertx3: missing_feature vertx4: missing_feature + Test_HstsMissingHeader_ExtendedLocation: missing_feature Test_HstsMissingHeader_StackTrace: irrelevant (not expected to have a stack trace) test_insecure_auth_protocol.py: Test_InsecureAuthProtocol: @@ -140,6 +165,7 @@ tests/: ratpack: missing_feature spring-boot-3-native: missing_feature (GraalVM. Tracing support only) spring-boot-openliberty: bug (APPSEC-54981) + Test_InsecureAuthProtocol_ExtendedLocation: missing_feature Test_InsecureAuthProtocol_StackTrace: '*': v1.43.0 akka-http: missing_feature @@ -155,6 +181,7 @@ tests/: ratpack: missing_feature spring-boot-3-native: missing_feature TestInsecureCookieNameFilter: missing_feature + TestInsecureCookie_ExtendedLocation: missing_feature TestInsecureCookie_StackTrace: '*': v1.43.0 akka-http: missing_feature @@ -172,6 +199,7 @@ tests/: spring-boot-3-native: missing_feature (GraalVM. Tracing support only) vertx3: v1.12.0 vertx4: v1.12.0 + TestLDAPInjection_ExtendedLocation: missing_feature TestLDAPInjection_StackTrace: '*': v1.43.0 play: missing_feature (endpoint not implemented) @@ -185,6 +213,7 @@ tests/: ratpack: missing_feature spring-boot-3-native: missing_feature TestNoHttponlyCookieNameFilter: missing_feature + TestNoHttponlyCookie_ExtendedLocation: missing_feature TestNoHttponlyCookie_StackTrace: '*': v1.43.0 akka-http: missing_feature @@ -199,6 +228,7 @@ tests/: ratpack: missing_feature spring-boot-3-native: missing_feature TestNoSamesiteCookieNameFilter: missing_feature + TestNoSamesiteCookie_ExtendedLocation: missing_feature TestNoSamesiteCookie_StackTrace: '*': v1.43.0 akka-http: missing_feature @@ -207,6 +237,7 @@ tests/: spring-boot-3-native: missing_feature test_nosql_mongodb_injection.py: TestNoSqlMongodbInjection: missing_feature + TestNoSqlMongodbInjection_ExtendedLocation: missing_feature TestNoSqlMongodbInjection_StackTrace: missing_feature test_path_traversal.py: TestPathTraversal: @@ -218,6 +249,7 @@ tests/: resteasy-netty3: v1.11.0 spring-boot-3-native: missing_feature (GraalVM. Tracing support only) vertx3: v1.12.0 + TestPathTraversal_ExtendedLocation: missing_feature TestPathTraversal_StackTrace: '*': v1.43.0 play: missing_feature @@ -230,6 +262,7 @@ tests/: play: missing_feature ratpack: missing_feature spring-boot-3-native: missing_feature (GraalVM. Tracing support only) + TestReflectionInjection_ExtendedLocation: missing_feature TestReflectionInjection_StackTrace: '*': v1.43.0 akka-http: missing_feature @@ -246,6 +279,7 @@ tests/: resteasy-netty3: v1.11.0 spring-boot-3-native: missing_feature (GraalVM. Tracing support only) vertx3: v1.12.0 + TestSqlInjection_ExtendedLocation: missing_feature TestSqlInjection_StackTrace: '*': v1.43.0 play: missing_feature @@ -259,6 +293,7 @@ tests/: ratpack: missing_feature (No endpoint implemented) spring-boot-3-native: missing_feature (GraalVM. Tracing support only) vertx4: missing_feature (No endpoint implemented) + TestSSRF_ExtendedLocation: missing_feature TestSSRF_StackTrace: '*': v1.43.0 akka-http: missing_feature (No endpoint implemented) @@ -268,6 +303,7 @@ tests/: vertx4: missing_feature (No endpoint implemented) test_template_injection.py: TestTemplateInjection: missing_feature + TestTemplateInjection_ExtendedLocation: missing_feature test_trust_boundary_violation.py: Test_TrustBoundaryViolation: '*': v1.22.0 @@ -279,6 +315,7 @@ tests/: spring-boot-3-native: missing_feature (GraalVM. Tracing support only) vertx3: missing_feature vertx4: missing_feature + Test_TrustBoundaryViolation_ExtendedLocation: missing_feature Test_TrustBoundaryViolation_StackTrace: '*': v1.43.0 akka-http: missing_feature @@ -300,6 +337,7 @@ tests/: spring-boot-3-native: missing_feature (No endpoint implemented) vertx3: missing_feature (No endpoint implemented) vertx4: missing_feature (No endpoint implemented) + TestUntrustedDeserialization_ExtendedLocation: missing_feature TestUntrustedDeserialization_StackTrace: '*': v1.43.0 akka-http: missing_feature (No endpoint implemented) @@ -320,6 +358,7 @@ tests/: spring-boot-jetty: v1.17.0 vertx3: v1.16.0 vertx4: v1.17.0 + TestUnvalidatedHeader_ExtendedLocation: missing_feature TestUnvalidatedHeader_StackTrace: '*': v1.43.0 akka-http: missing_feature @@ -334,6 +373,7 @@ tests/: spring-boot-3-native: missing_feature (GraalVM. Tracing support only) spring-boot-jetty: v1.17.0 vertx4: v1.17.0 + TestUnvalidatedRedirect_ExtendedLocation: missing_feature TestUnvalidatedRedirect_StackTrace: '*': v1.43.0 akka-http: missing_feature @@ -351,6 +391,7 @@ tests/: spring-boot-3-native: missing_feature (GraalVM. Tracing support only) vertx3: v1.16.0 vertx4: v1.17.0 + TestUnvalidatedForward_ExtendedLocation: missing_feature TestUnvalidatedForward_StackTrace: '*': v1.43.0 akka-http: irrelevant (No forward) @@ -364,6 +405,7 @@ tests/: '*': v0.108.0 play: missing_feature (no endpoint) spring-boot-3-native: missing_feature (GraalVM. Tracing support only) + TestWeakCipher_ExtendedLocation: missing_feature TestWeakCipher_StackTrace: '*': v1.43.0 play: missing_feature (no endpoint) @@ -377,6 +419,7 @@ tests/: '*': v0.108.0 play: missing_feature (no endpoint) spring-boot-3-native: missing_feature (GraalVM. Tracing support only) + TestWeakHash_ExtendedLocation: missing_feature TestWeakHash_StackTrace: '*': v1.43.0 play: missing_feature (no endpoint) @@ -386,6 +429,7 @@ tests/: '*': v1.15.0 play: missing_feature (no endpoint) spring-boot-3-native: missing_feature (GraalVM. Tracing support only) + TestWeakRandomness_ExtendedLocation: missing_feature TestWeakRandomness_StackTrace: '*': v1.43.0 play: missing_feature (no endpoint) @@ -402,6 +446,7 @@ tests/: spring-boot-openliberty: bug (APPSEC-54981) vertx3: missing_feature vertx4: missing_feature + Test_XContentSniffing_ExtendedLocation: missing_feature Test_XContentSniffing_StackTrace: irrelevant (not expected to have a stack trace) test_xpath_injection.py: TestXPathInjection: @@ -409,6 +454,7 @@ tests/: play: missing_feature ratpack: missing_feature spring-boot-3-native: missing_feature + TestXPathInjection_ExtendedLocation: missing_feature TestXPathInjection_StackTrace: '*': v1.43.0 play: missing_feature @@ -425,6 +471,7 @@ tests/: spring-boot-3-native: missing_feature (GraalVM. Tracing support only) vertx3: missing_feature vertx4: missing_feature + TestXSS_ExtendedLocation: missing_feature TestXSS_StackTrace: '*': v1.43.0 akka-http: missing_feature @@ -678,6 +725,7 @@ tests/: Test_Lfi_Telemetry: '*': v1.40.0 spring-boot-3-native: missing_feature (GraalVM. Tracing support only) + Test_Lfi_Telemetry_Multiple_Exploits: missing_feature Test_Lfi_UrlQuery: '*': v1.40.0 spring-boot-3-native: missing_feature (GraalVM. Tracing support only) @@ -847,7 +895,7 @@ tests/: ratpack: v0.99.0 spring-boot-3-native: missing_feature (GraalVM. Tracing support only) vertx3: v0.99.0 - vertx4: bug (APPSEC-54983) + vertx4: v1.47.0 Test_BodyRaw: '*': missing_feature akka-http: v1.22.0 @@ -883,10 +931,10 @@ tests/: Test_PathParams: '*': v0.95.1 akka-http: missing_feature (unclear how to implement; matching doesn't happen in one go) - jersey-grizzly2: missing_feature + jersey-grizzly2: v1.15.0 play: v1.22.0 ratpack: v0.99.0 - resteasy-netty3: missing_feature + resteasy-netty3: v1.15.0 spring-boot-3-native: missing_feature (GraalVM. Tracing support only) vertx3: v0.99.0 Test_ResponseStatus: @@ -926,7 +974,11 @@ tests/: spring-boot-3-native: missing_feature spring-boot-openliberty: v1.3.0 vertx3: v1.7.0 - Test_Blocking_strip_response_headers: missing_feature + Test_Blocking_strip_response_headers: + '*': missing_feature + akka-http: v1.22.0 + play: v1.22.0 + spring-boot-3-native: missing_feature (GraalVM. Tracing support only) Test_CustomBlockingResponse: '*': v1.11.0 akka-http: v1.22.0 @@ -1023,6 +1075,7 @@ tests/: akka-http: v1.22.0 spring-boot-3-native: missing_feature (GraalVM. Tracing support only) test_asm_standalone.py: + Test_AppSecStandalone_NotEnabled: missing_feature Test_AppSecStandalone_UpstreamPropagation: '*': v1.36.0 spring-boot-3-native: missing_feature (GraalVM. Tracing support only) @@ -1332,9 +1385,9 @@ tests/: ratpack: missing_feature (endpoint not implemented) resteasy-netty3: missing_feature (endpoint not implemented) spring-boot-3-native: irrelevant (GraalVM. Tracing support only) - spring-boot-jetty: missing_feature (missing addresses) - spring-boot-undertow: missing_feature (missing addresses) - spring-boot-wildfly: missing_feature (missing addresses) + spring-boot-jetty: v1.42.0 + spring-boot-undertow: v1.42.0 + spring-boot-wildfly: v1.42.0 vertx3: v1.46.0 vertx4: v1.46.0 Test_Fingerprinting_Session_Capability: @@ -1357,7 +1410,12 @@ tests/: akka-http: v1.22.0 spring-boot-3-native: missing_feature (GraalVM. Tracing support only) Test_StandardizationBlockMode: missing_feature - test_metastruct.py: missing_feature + test_metastruct.py: + Test_SecurityEvents_Appsec_Metastruct_Disabled: + spring-boot-3-native: missing_feature (GraalVM. Tracing support only) + Test_SecurityEvents_Appsec_Metastruct_Enabled: missing_feature (APPSEC-4766) + Test_SecurityEvents_Iast_Metastruct_Disabled: missing_feature (APPSEC-4766) + Test_SecurityEvents_Iast_Metastruct_Enabled: missing_feature (APPSEC-4766) test_rate_limiter.py: Test_Main: akka-http: v1.22.0 @@ -1366,11 +1424,9 @@ tests/: test_remote_config_rule_changes.py: Test_BlockingActionChangesWithRemoteConfig: '*': v1.42.0 - play: bug (APPSEC-55789) spring-boot-3-native: missing_feature (GraalVM. Tracing support only) Test_UpdateRuleFileWithRemoteConfig: '*': v1.42.0 - play: bug (APPSEC-55791) spring-boot-3-native: missing_feature (GraalVM. Tracing support only) test_reports.py: Test_AttackTimestamp: @@ -1424,7 +1480,6 @@ tests/: test_suspicious_attacker_blocking.py: Test_Suspicious_Attacker_Blocking: '*': v1.39.0 - play: bug (APPSEC-54986) spring-boot-3-native: missing_feature (GraalVM. Tracing support only) test_traces.py: Test_AppSecEventSpanTags: @@ -1617,6 +1672,7 @@ tests/: Test_Config_TraceEnabled: v1.39.0 Test_Config_TraceLogDirectory: missing_feature Test_Config_UnifiedServiceTagging: v1.41.1 + Test_Stable_Config_Default: missing_feature test_crashtracking.py: Test_Crashtracking: v1.38.0 test_dynamic_configuration.py: @@ -1760,7 +1816,9 @@ tests/: Test_ExtractBehavior_Ignore: incomplete_test_app (/make_distant_call endpoint is not correctly implemented) Test_ExtractBehavior_Restart: incomplete_test_app (/make_distant_call endpoint is not correctly implemented) Test_ExtractBehavior_Restart_With_Extract_First: incomplete_test_app (/make_distant_call endpoint is not correctly implemented) - Test_HeaderTags: missing_feature + Test_HeaderTags: + '*': v1.35.0 + spring-boot-3-native: missing_feature (GraalVM. Tracing support only) Test_HeaderTags_Colon_Leading: v0.102.0 Test_HeaderTags_Colon_Trailing: v0.102.0 Test_HeaderTags_DynamicConfig: missing_feature @@ -1803,6 +1861,7 @@ tests/: '*': missing_feature spring-boot: v0.102.0 spring-boot-jetty: v0.102.0 + spring-boot-wildfly: v0.102.0 Test_StandardTagsStatusCode: v0.102.0 Test_StandardTagsUrl: v0.107.1 Test_StandardTagsUserAgent: v0.107.1 diff --git a/manifests/nodejs.yml b/manifests/nodejs.yml index a3f8c1de90..7e3a43902e 100644 --- a/manifests/nodejs.yml +++ b/manifests/nodejs.yml @@ -98,6 +98,7 @@ tests/: TestCodeInjection: '*': *ref_5_20_0 nextjs: missing_feature + TestCodeInjection_ExtendedLocation: missing_feature TestCodeInjection_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature @@ -105,16 +106,19 @@ tests/: TestCommandInjection: '*': *ref_3_11_0 nextjs: missing_feature + TestCommandInjection_ExtendedLocation: missing_feature TestCommandInjection_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature test_email_html_injection.py: TestEmailHtmlInjection: missing_feature + TestEmailHtmlInjection_ExtendedLocation: missing_feature TestEmailHtmlInjection_StackTrace: missing_feature test_hardcoded_passwords.py: Test_HardcodedPasswords: '*': *ref_5_13_0 nextjs: missing_feature + Test_HardcodedPasswords_ExtendedLocation: missing_feature Test_HardcodedPasswords_StackTrace: missing_feature test_hardcoded_secrets.py: Test_HardcodedSecrets: @@ -123,6 +127,7 @@ tests/: Test_HardcodedSecretsExtended: '*': *ref_5_11_0 nextjs: missing_feature + Test_HardcodedSecrets_ExtendedLocation: missing_feature Test_HardcodedSecrets_StackTrace: missing_feature test_header_injection.py: TestHeaderInjection: @@ -144,6 +149,7 @@ tests/: '*': *ref_5_26_0 express5: *ref_5_29_0 # test uses querystring nextjs: missing_feature + TestHeaderInjection_ExtendedLocation: missing_feature TestHeaderInjection_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature @@ -151,9 +157,11 @@ tests/: Test_HstsMissingHeader: '*': *ref_4_8_0 nextjs: missing_feature + Test_HstsMissingHeader_ExtendedLocation: missing_feature Test_HstsMissingHeader_StackTrace: missing_feature test_insecure_auth_protocol.py: Test_InsecureAuthProtocol: missing_feature + Test_InsecureAuthProtocol_ExtendedLocation: missing_feature Test_InsecureAuthProtocol_StackTrace: missing_feature test_insecure_cookie.py: TestInsecureCookie: @@ -162,6 +170,7 @@ tests/: TestInsecureCookieNameFilter: '*': *ref_5_24_0 nextjs: missing_feature + TestInsecureCookie_ExtendedLocation: missing_feature TestInsecureCookie_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature @@ -169,6 +178,7 @@ tests/: TestLDAPInjection: '*': *ref_4_1_0 nextjs: missing_feature + TestLDAPInjection_ExtendedLocation: missing_feature TestLDAPInjection_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature @@ -179,6 +189,7 @@ tests/: TestNoHttponlyCookieNameFilter: '*': *ref_5_24_0 nextjs: missing_feature + TestNoHttponlyCookie_ExtendedLocation: missing_feature TestNoHttponlyCookie_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature @@ -189,6 +200,7 @@ tests/: TestNoSamesiteCookieNameFilter: '*': *ref_5_24_0 nextjs: missing_feature + TestNoSamesiteCookie_ExtendedLocation: missing_feature TestNoSamesiteCookie_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature @@ -196,6 +208,7 @@ tests/: TestNoSqlMongodbInjection: '*': *ref_4_17_0 nextjs: missing_feature + TestNoSqlMongodbInjection_ExtendedLocation: missing_feature TestNoSqlMongodbInjection_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature @@ -203,16 +216,19 @@ tests/: TestPathTraversal: '*': *ref_3_19_0 nextjs: missing_feature + TestPathTraversal_ExtendedLocation: missing_feature TestPathTraversal_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature test_reflection_injection.py: TestReflectionInjection: missing_feature + TestReflectionInjection_ExtendedLocation: missing_feature TestReflectionInjection_StackTrace: missing_feature test_sql_injection.py: TestSqlInjection: '*': *ref_3_11_0 nextjs: missing_feature + TestSqlInjection_ExtendedLocation: missing_feature TestSqlInjection_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature @@ -220,6 +236,7 @@ tests/: TestSSRF: '*': *ref_4_1_0 nextjs: missing_feature + TestSSRF_ExtendedLocation: missing_feature TestSSRF_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature @@ -227,34 +244,43 @@ tests/: TestTemplateInjection: '*': *ref_5_26_0 nextjs: missing_feature + TestTemplateInjection_ExtendedLocation: missing_feature test_trust_boundary_violation.py: Test_TrustBoundaryViolation: missing_feature + Test_TrustBoundaryViolation_ExtendedLocation: missing_feature Test_TrustBoundaryViolation_StackTrace: missing_feature test_untrusted_deserialization.py: TestUntrustedDeserialization: '*': *ref_5_32_0 nextjs: missing_feature - TestUntrustedDeserialization_StackTrace: missing_feature + TestUntrustedDeserialization_ExtendedLocation: missing_feature + TestUntrustedDeserialization_StackTrace: + '*': *ref_5_32_0 + nextjs: missing_feature test_unvalidated_redirect.py: TestUnvalidatedHeader: '*': *ref_4_3_0 nextjs: missing_feature + TestUnvalidatedHeader_ExtendedLocation: missing_feature TestUnvalidatedHeader_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature TestUnvalidatedRedirect: '*': *ref_4_3_0 nextjs: missing_feature + TestUnvalidatedRedirect_ExtendedLocation: missing_feature TestUnvalidatedRedirect_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature test_unvalidated_redirect_forward.py: TestUnvalidatedForward: missing_feature + TestUnvalidatedForward_ExtendedLocation: missing_feature TestUnvalidatedForward_StackTrace: missing_feature test_weak_cipher.py: TestWeakCipher: '*': *ref_3_6_0 nextjs: missing_feature + TestWeakCipher_ExtendedLocation: missing_feature TestWeakCipher_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature @@ -265,6 +291,7 @@ tests/: TestWeakHash: '*': *ref_3_11_0 nextjs: missing_feature + TestWeakHash_ExtendedLocation: missing_feature TestWeakHash_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature @@ -272,6 +299,7 @@ tests/: TestWeakRandomness: '*': *ref_5_1_0 nextjs: missing_feature + TestWeakRandomness_ExtendedLocation: missing_feature TestWeakRandomness_StackTrace: '*': *ref_5_33_0 nextjs: missing_feature @@ -279,12 +307,15 @@ tests/: Test_XContentSniffing: '*': *ref_4_8_0 nextjs: missing_feature + Test_XContentSniffing_ExtendedLocation: missing_feature Test_XContentSniffing_StackTrace: missing_feature test_xpath_injection.py: TestXPathInjection: missing_feature + TestXPathInjection_ExtendedLocation: missing_feature TestXPathInjection_StackTrace: missing_feature test_xss.py: TestXSS: missing_feature + TestXSS_ExtendedLocation: missing_feature TestXSS_StackTrace: missing_feature source/: test_body.py: @@ -386,6 +417,7 @@ tests/: Test_Lfi_Rules_Version: *ref_5_26_0 Test_Lfi_StackTrace: *ref_5_24_0 Test_Lfi_Telemetry: *ref_5_24_0 + Test_Lfi_Telemetry_Multiple_Exploits: missing_feature Test_Lfi_UrlQuery: '*': *ref_5_24_0 express5: *ref_5_29_0 @@ -809,6 +841,7 @@ tests/: Test_Config_TraceEnabled: *ref_4_3_0 Test_Config_TraceLogDirectory: missing_feature Test_Config_UnifiedServiceTagging: *ref_5_25_0 + Test_Stable_Config_Default: missing_feature test_crashtracking.py: Test_Crashtracking: *ref_5_27_0 test_dynamic_configuration.py: @@ -926,10 +959,8 @@ tests/: nextjs: bug (APMAPI-939) # the nextjs weblog application changes the sampling priority from 1.0 to 2.0 test_graphql.py: Test_GraphQLQueryErrorReporting: - '*': *ref_5_34_0 - express4-typescript: incomplete_test_app (endpoint not implemented) - express5: missing_feature - nextjs: missing_feature + '*': irrelevant + express4: missing_feature test_identify.py: Test_Basic: v2.4.0 Test_Propagate: *ref_3_2_0 diff --git a/manifests/php.yml b/manifests/php.yml index 9800e5def2..fa4f756aa0 100644 --- a/manifests/php.yml +++ b/manifests/php.yml @@ -26,19 +26,24 @@ tests/: sink/: test_code_injection.py: TestCodeInjection: missing_feature + TestCodeInjection_ExtendedLocation: missing_feature TestCodeInjection_StackTrace: missing_feature test_command_injection.py: TestCommandInjection: missing_feature + TestCommandInjection_ExtendedLocation: missing_feature TestCommandInjection_StackTrace: missing_feature test_email_html_injection.py: TestEmailHtmlInjection: missing_feature + TestEmailHtmlInjection_ExtendedLocation: missing_feature TestEmailHtmlInjection_StackTrace: missing_feature test_hardcoded_passwords.py: Test_HardcodedPasswords: missing_feature + Test_HardcodedPasswords_ExtendedLocation: missing_feature Test_HardcodedPasswords_StackTrace: missing_feature test_hardcoded_secrets.py: Test_HardcodedSecrets: missing_feature Test_HardcodedSecretsExtended: missing_feature + Test_HardcodedSecrets_ExtendedLocation: missing_feature Test_HardcodedSecrets_StackTrace: missing_feature test_header_injection.py: TestHeaderInjection: missing_feature @@ -46,78 +51,102 @@ tests/: TestHeaderInjectionExclusionContentEncoding: missing_feature TestHeaderInjectionExclusionPragma: missing_feature TestHeaderInjectionExclusionTransferEncoding: missing_feature + TestHeaderInjection_ExtendedLocation: missing_feature TestHeaderInjection_StackTrace: missing_feature test_hsts_missing_header.py: Test_HstsMissingHeader: missing_feature + Test_HstsMissingHeader_ExtendedLocation: missing_feature Test_HstsMissingHeader_StackTrace: missing_feature test_insecure_auth_protocol.py: Test_InsecureAuthProtocol: missing_feature + Test_InsecureAuthProtocol_ExtendedLocation: missing_feature Test_InsecureAuthProtocol_StackTrace: missing_feature test_insecure_cookie.py: TestInsecureCookie: missing_feature TestInsecureCookieNameFilter: missing_feature + TestInsecureCookie_ExtendedLocation: missing_feature TestInsecureCookie_StackTrace: missing_feature test_ldap_injection.py: TestLDAPInjection: missing_feature + TestLDAPInjection_ExtendedLocation: missing_feature TestLDAPInjection_StackTrace: missing_feature test_no_httponly_cookie.py: TestNoHttponlyCookie: missing_feature TestNoHttponlyCookieNameFilter: missing_feature + TestNoHttponlyCookie_ExtendedLocation: missing_feature TestNoHttponlyCookie_StackTrace: missing_feature test_no_samesite_cookie.py: TestNoSamesiteCookie: missing_feature TestNoSamesiteCookieNameFilter: missing_feature + TestNoSamesiteCookie_ExtendedLocation: missing_feature TestNoSamesiteCookie_StackTrace: missing_feature test_nosql_mongodb_injection.py: TestNoSqlMongodbInjection: missing_feature + TestNoSqlMongodbInjection_ExtendedLocation: missing_feature TestNoSqlMongodbInjection_StackTrace: missing_feature test_path_traversal.py: TestPathTraversal: missing_feature + TestPathTraversal_ExtendedLocation: missing_feature TestPathTraversal_StackTrace: missing_feature test_reflection_injection.py: TestReflectionInjection: missing_feature + TestReflectionInjection_ExtendedLocation: missing_feature TestReflectionInjection_StackTrace: missing_feature test_sql_injection.py: TestSqlInjection: missing_feature + TestSqlInjection_ExtendedLocation: missing_feature TestSqlInjection_StackTrace: missing_feature test_ssrf.py: TestSSRF: missing_feature + TestSSRF_ExtendedLocation: missing_feature TestSSRF_StackTrace: missing_feature test_template_injection.py: TestTemplateInjection: missing_feature + TestTemplateInjection_ExtendedLocation: missing_feature test_trust_boundary_violation.py: Test_TrustBoundaryViolation: missing_feature + Test_TrustBoundaryViolation_ExtendedLocation: missing_feature Test_TrustBoundaryViolation_StackTrace: missing_feature test_untrusted_deserialization.py: TestUntrustedDeserialization: missing_feature + TestUntrustedDeserialization_ExtendedLocation: missing_feature TestUntrustedDeserialization_StackTrace: missing_feature test_unvalidated_redirect.py: TestUnvalidatedHeader: missing_feature + TestUnvalidatedHeader_ExtendedLocation: missing_feature TestUnvalidatedHeader_StackTrace: missing_feature TestUnvalidatedRedirect: missing_feature + TestUnvalidatedRedirect_ExtendedLocation: missing_feature TestUnvalidatedRedirect_StackTrace: missing_feature test_unvalidated_redirect_forward.py: TestUnvalidatedForward: missing_feature + TestUnvalidatedForward_ExtendedLocation: missing_feature TestUnvalidatedForward_StackTrace: missing_feature test_weak_cipher.py: TestWeakCipher: missing_feature + TestWeakCipher_ExtendedLocation: missing_feature TestWeakCipher_StackTrace: missing_feature test_weak_hash.py: TestDeduplication: missing_feature TestWeakHash: missing_feature + TestWeakHash_ExtendedLocation: missing_feature TestWeakHash_StackTrace: missing_feature test_weak_randomness.py: TestWeakRandomness: missing_feature + TestWeakRandomness_ExtendedLocation: missing_feature TestWeakRandomness_StackTrace: missing_feature test_xcontent_sniffing.py: Test_XContentSniffing: missing_feature + Test_XContentSniffing_ExtendedLocation: missing_feature Test_XContentSniffing_StackTrace: missing_feature test_xpath_injection.py: TestXPathInjection: missing_feature + TestXPathInjection_ExtendedLocation: missing_feature TestXPathInjection_StackTrace: missing_feature test_xss.py: TestXSS: '*': missing_feature + TestXSS_ExtendedLocation: missing_feature TestXSS_StackTrace: missing_feature source/: test_body.py: @@ -176,7 +205,8 @@ tests/: Test_Lfi_RC_CustomAction: missing_feature # It works but missing this APPSEC-56457 Test_Lfi_Rules_Version: v1.6.2 Test_Lfi_StackTrace: v1.6.2 - Test_Lfi_Telemetry: missing_feature + Test_Lfi_Telemetry: v1.7.0 + Test_Lfi_Telemetry_Multiple_Exploits: v1.7.0 Test_Lfi_UrlQuery: v1.6.2 Test_Lfi_Waf_Version: v1.6.2 test_shi.py: @@ -213,7 +243,7 @@ tests/: Test_Ssrf_Optional_SpanTags: v1.7.0 Test_Ssrf_Rules_Version: v1.7.0 Test_Ssrf_StackTrace: v1.7.0 - Test_Ssrf_Telemetry: missing_feature + Test_Ssrf_Telemetry: v1.7.0 Test_Ssrf_UrlQuery: v1.7.0 Test_Ssrf_Waf_Version: v1.7.0 waf/: @@ -263,6 +293,7 @@ tests/: test_telemetry.py: Test_TelemetryMetrics: missing_feature test_asm_standalone.py: + Test_AppSecStandalone_NotEnabled: v1.6.2 Test_AppSecStandalone_UpstreamPropagation: v1.6.0 Test_AppSecStandalone_UpstreamPropagation_V2: missing_feature Test_IastStandalone_UpstreamPropagation: missing_feature @@ -396,6 +427,7 @@ tests/: Test_Config_TraceEnabled: v1.3.0 # Unknown initial version Test_Config_TraceLogDirectory: missing_feature Test_Config_UnifiedServiceTagging: v1.5.0 + Test_Stable_Config_Default: missing_feature test_crashtracking.py: Test_Crashtracking: v1.3.0 test_dynamic_configuration.py: diff --git a/manifests/python.yml b/manifests/python.yml index 349ac2565a..cd4475b4cf 100644 --- a/manifests/python.yml +++ b/manifests/python.yml @@ -51,21 +51,26 @@ tests/: sink/: test_code_injection.py: TestCodeInjection: v2.20.0 + TestCodeInjection_ExtendedLocation: missing_feature TestCodeInjection_StackTrace: v2.20.0 test_command_injection.py: TestCommandInjection: '*': v2.10.0 fastapi: v2.15.0 + TestCommandInjection_ExtendedLocation: missing_feature TestCommandInjection_StackTrace: v2.19.0.dev test_email_html_injection.py: TestEmailHtmlInjection: missing_feature + TestEmailHtmlInjection_ExtendedLocation: missing_feature TestEmailHtmlInjection_StackTrace: missing_feature test_hardcoded_passwords.py: Test_HardcodedPasswords: missing_feature + Test_HardcodedPasswords_ExtendedLocation: missing_feature Test_HardcodedPasswords_StackTrace: missing_feature test_hardcoded_secrets.py: Test_HardcodedSecrets: missing_feature Test_HardcodedSecretsExtended: missing_feature + Test_HardcodedSecrets_ExtendedLocation: missing_feature Test_HardcodedSecrets_StackTrace: missing_feature test_header_injection.py: TestHeaderInjection: @@ -75,46 +80,56 @@ tests/: TestHeaderInjectionExclusionContentEncoding: missing_feature TestHeaderInjectionExclusionPragma: missing_feature TestHeaderInjectionExclusionTransferEncoding: missing_feature + TestHeaderInjection_ExtendedLocation: missing_feature TestHeaderInjection_StackTrace: '*': v2.19.0.dev fastapi: v2.20.0.dev test_hsts_missing_header.py: Test_HstsMissingHeader: missing_feature + Test_HstsMissingHeader_ExtendedLocation: missing_feature Test_HstsMissingHeader_StackTrace: missing_feature test_insecure_auth_protocol.py: Test_InsecureAuthProtocol: missing_feature + Test_InsecureAuthProtocol_ExtendedLocation: missing_feature Test_InsecureAuthProtocol_StackTrace: missing_feature test_insecure_cookie.py: TestInsecureCookie: '*': v1.19.0 fastapi: v2.16.0-dev TestInsecureCookieNameFilter: missing_feature + TestInsecureCookie_ExtendedLocation: missing_feature TestInsecureCookie_StackTrace: missing_feature test_ldap_injection.py: TestLDAPInjection: missing_feature + TestLDAPInjection_ExtendedLocation: missing_feature TestLDAPInjection_StackTrace: missing_feature test_no_httponly_cookie.py: TestNoHttponlyCookie: '*': v1.19.0 fastapi: v2.16.0-dev TestNoHttponlyCookieNameFilter: missing_feature + TestNoHttponlyCookie_ExtendedLocation: missing_feature TestNoHttponlyCookie_StackTrace: missing_feature test_no_samesite_cookie.py: TestNoSamesiteCookie: '*': v1.19.0 fastapi: v2.16.0-dev TestNoSamesiteCookieNameFilter: missing_feature + TestNoSamesiteCookie_ExtendedLocation: missing_feature TestNoSamesiteCookie_StackTrace: missing_feature test_nosql_mongodb_injection.py: TestNoSqlMongodbInjection: missing_feature + TestNoSqlMongodbInjection_ExtendedLocation: missing_feature TestNoSqlMongodbInjection_StackTrace: missing_feature test_path_traversal.py: TestPathTraversal: '*': v2.10.0 fastapi: v2.15.0 + TestPathTraversal_ExtendedLocation: missing_feature TestPathTraversal_StackTrace: v2.19.0.dev test_reflection_injection.py: TestReflectionInjection: missing_feature + TestReflectionInjection_ExtendedLocation: missing_feature TestReflectionInjection_StackTrace: missing_feature test_sql_injection.py: TestSqlInjection: @@ -123,51 +138,65 @@ tests/: flask-poc: v1.18.0 pylons: missing_feature python3.12: v1.18.0 + TestSqlInjection_ExtendedLocation: missing_feature TestSqlInjection_StackTrace: v2.19.0.dev test_ssrf.py: TestSSRF: '*': v2.10.0 fastapi: v2.15.0 + TestSSRF_ExtendedLocation: missing_feature TestSSRF_StackTrace: v2.19.0.dev test_template_injection.py: TestTemplateInjection: missing_feature + TestTemplateInjection_ExtendedLocation: missing_feature test_trust_boundary_violation.py: Test_TrustBoundaryViolation: missing_feature + Test_TrustBoundaryViolation_ExtendedLocation: missing_feature Test_TrustBoundaryViolation_StackTrace: missing_feature test_untrusted_deserialization.py: TestUntrustedDeserialization: missing_feature + TestUntrustedDeserialization_ExtendedLocation: missing_feature TestUntrustedDeserialization_StackTrace: missing_feature test_unvalidated_redirect.py: TestUnvalidatedHeader: missing_feature + TestUnvalidatedHeader_ExtendedLocation: missing_feature TestUnvalidatedHeader_StackTrace: missing_feature TestUnvalidatedRedirect: missing_feature + TestUnvalidatedRedirect_ExtendedLocation: missing_feature TestUnvalidatedRedirect_StackTrace: missing_feature test_unvalidated_redirect_forward.py: TestUnvalidatedForward: missing_feature + TestUnvalidatedForward_ExtendedLocation: missing_feature TestUnvalidatedForward_StackTrace: missing_feature test_weak_cipher.py: TestWeakCipher: '*': v1.18.0 fastapi: v2.15.0 + TestWeakCipher_ExtendedLocation: missing_feature TestWeakCipher_StackTrace: v2.19.0.dev test_weak_hash.py: TestDeduplication: '*': v1.18.0 TestWeakHash: '*': v1.18.0 + TestWeakHash_ExtendedLocation: missing_feature TestWeakHash_StackTrace: v2.19.0.dev test_weak_randomness.py: TestWeakRandomness: '*': v2.0.0 + TestWeakRandomness_ExtendedLocation: missing_feature TestWeakRandomness_StackTrace: v2.19.0.dev test_xcontent_sniffing.py: Test_XContentSniffing: missing_feature + Test_XContentSniffing_ExtendedLocation: missing_feature Test_XContentSniffing_StackTrace: missing_feature test_xpath_injection.py: TestXPathInjection: missing_feature + TestXPathInjection_ExtendedLocation: missing_feature TestXPathInjection_StackTrace: missing_feature test_xss.py: TestXSS: missing_feature + TestXSS_ExtendedLocation: missing_feature TestXSS_StackTrace: missing_feature source/: test_body.py: @@ -196,9 +225,9 @@ tests/: TestHeaderValue: '*': v1.18.0 fastapi: v2.13.0 - flask-poc: bug (APPSEC-56329) - uds-flask: bug (APPSEC-56329) - uwsgi-poc: bug (APPSEC-56329) + flask-poc: v3.1.0.dev + uds-flask: v3.1.0.dev + uwsgi-poc: v3.1.0.dev test_kafka_key.py: TestKafkaKey: missing_feature test_kafka_value.py: @@ -209,6 +238,7 @@ tests/: TestParameterName: "*": v2.20.0 fastapi: v2.20.1 + python3.12: bug (APPSEC-56375) test_parameter_value.py: TestParameterValue: '*': v2.9.0 @@ -258,6 +288,7 @@ tests/: Test_Lfi_Rules_Version: v2.18.0.dev Test_Lfi_StackTrace: v2.10.0 Test_Lfi_Telemetry: v2.10.0 + Test_Lfi_Telemetry_Multiple_Exploits: missing_feature Test_Lfi_UrlQuery: v2.10.0 Test_Lfi_Waf_Version: v2.15.0 test_shi.py: @@ -455,8 +486,8 @@ tests/: Test_V3_Login_Events_RC: v2.11.0 test_automated_user_and_session_tracking.py: Test_Automated_Session_Blocking: missing_feature - Test_Automated_User_Blocking: v2.21.0.dev - Test_Automated_User_Tracking: v2.21.0.dev + Test_Automated_User_Blocking: v3.0.0.rc + Test_Automated_User_Tracking: v3.0.0.rc test_blocking_addresses.py: Test_BlockingGraphqlResolvers: missing_feature Test_Blocking_client_ip: @@ -552,7 +583,7 @@ tests/: Test_StandardizationBlockMode: missing_feature test_metastruct.py: Test_SecurityEvents_Appsec_Metastruct_Disabled: irrelevant (no fallback will be implemented) - Test_SecurityEvents_Appsec_Metastruct_Enabled: missing_feature + Test_SecurityEvents_Appsec_Metastruct_Enabled: v3.0.0.rc Test_SecurityEvents_Iast_Metastruct_Disabled: irrelevant (no fallback will be implemented) Test_SecurityEvents_Iast_Metastruct_Enabled: missing_feature test_only_python.py: @@ -781,6 +812,7 @@ tests/: Test_Config_TraceEnabled: v2.12.2 Test_Config_TraceLogDirectory: missing_feature Test_Config_UnifiedServiceTagging: v2.12.2 + Test_Stable_Config_Default: missing_feature test_crashtracking.py: Test_Crashtracking: v2.11.2 test_dynamic_configuration.py: diff --git a/manifests/ruby.yml b/manifests/ruby.yml index 66d7d23936..61f662fcc4 100644 --- a/manifests/ruby.yml +++ b/manifests/ruby.yml @@ -28,19 +28,24 @@ tests/: sink/: test_code_injection.py: TestCodeInjection: missing_feature + TestCodeInjection_ExtendedLocation: missing_feature TestCodeInjection_StackTrace: missing_feature test_command_injection.py: TestCommandInjection: missing_feature + TestCommandInjection_ExtendedLocation: missing_feature TestCommandInjection_StackTrace: missing_feature test_email_html_injection.py: TestEmailHtmlInjection: missing_feature + TestEmailHtmlInjection_ExtendedLocation: missing_feature TestEmailHtmlInjection_StackTrace: missing_feature test_hardcoded_passwords.py: Test_HardcodedPasswords: missing_feature + Test_HardcodedPasswords_ExtendedLocation: missing_feature Test_HardcodedPasswords_StackTrace: missing_feature test_hardcoded_secrets.py: Test_HardcodedSecrets: missing_feature Test_HardcodedSecretsExtended: missing_feature + Test_HardcodedSecrets_ExtendedLocation: missing_feature Test_HardcodedSecrets_StackTrace: missing_feature test_header_injection.py: TestHeaderInjection: missing_feature @@ -48,77 +53,101 @@ tests/: TestHeaderInjectionExclusionContentEncoding: missing_feature TestHeaderInjectionExclusionPragma: missing_feature TestHeaderInjectionExclusionTransferEncoding: missing_feature + TestHeaderInjection_ExtendedLocation: missing_feature TestHeaderInjection_StackTrace: missing_feature test_hsts_missing_header.py: Test_HstsMissingHeader: missing_feature + Test_HstsMissingHeader_ExtendedLocation: missing_feature Test_HstsMissingHeader_StackTrace: missing_feature test_insecure_auth_protocol.py: Test_InsecureAuthProtocol: missing_feature + Test_InsecureAuthProtocol_ExtendedLocation: missing_feature Test_InsecureAuthProtocol_StackTrace: missing_feature test_insecure_cookie.py: TestInsecureCookie: missing_feature TestInsecureCookieNameFilter: missing_feature + TestInsecureCookie_ExtendedLocation: missing_feature TestInsecureCookie_StackTrace: missing_feature test_ldap_injection.py: TestLDAPInjection: missing_feature + TestLDAPInjection_ExtendedLocation: missing_feature TestLDAPInjection_StackTrace: missing_feature test_no_httponly_cookie.py: TestNoHttponlyCookie: missing_feature TestNoHttponlyCookieNameFilter: missing_feature + TestNoHttponlyCookie_ExtendedLocation: missing_feature TestNoHttponlyCookie_StackTrace: missing_feature test_no_samesite_cookie.py: TestNoSamesiteCookie: missing_feature TestNoSamesiteCookieNameFilter: missing_feature + TestNoSamesiteCookie_ExtendedLocation: missing_feature TestNoSamesiteCookie_StackTrace: missing_feature test_nosql_mongodb_injection.py: TestNoSqlMongodbInjection: missing_feature + TestNoSqlMongodbInjection_ExtendedLocation: missing_feature TestNoSqlMongodbInjection_StackTrace: missing_feature test_path_traversal.py: TestPathTraversal: missing_feature + TestPathTraversal_ExtendedLocation: missing_feature TestPathTraversal_StackTrace: missing_feature test_reflection_injection.py: TestReflectionInjection: missing_feature + TestReflectionInjection_ExtendedLocation: missing_feature TestReflectionInjection_StackTrace: missing_feature test_sql_injection.py: TestSqlInjection: missing_feature + TestSqlInjection_ExtendedLocation: missing_feature TestSqlInjection_StackTrace: missing_feature test_ssrf.py: TestSSRF: missing_feature + TestSSRF_ExtendedLocation: missing_feature TestSSRF_StackTrace: missing_feature test_template_injection.py: TestTemplateInjection: missing_feature + TestTemplateInjection_ExtendedLocation: missing_feature test_trust_boundary_violation.py: Test_TrustBoundaryViolation: missing_feature + Test_TrustBoundaryViolation_ExtendedLocation: missing_feature Test_TrustBoundaryViolation_StackTrace: missing_feature test_untrusted_deserialization.py: TestUntrustedDeserialization: missing_feature + TestUntrustedDeserialization_ExtendedLocation: missing_feature TestUntrustedDeserialization_StackTrace: missing_feature test_unvalidated_redirect.py: TestUnvalidatedHeader: missing_feature + TestUnvalidatedHeader_ExtendedLocation: missing_feature TestUnvalidatedHeader_StackTrace: missing_feature TestUnvalidatedRedirect: missing_feature + TestUnvalidatedRedirect_ExtendedLocation: missing_feature TestUnvalidatedRedirect_StackTrace: missing_feature test_unvalidated_redirect_forward.py: TestUnvalidatedForward: missing_feature + TestUnvalidatedForward_ExtendedLocation: missing_feature TestUnvalidatedForward_StackTrace: missing_feature test_weak_cipher.py: TestWeakCipher: missing_feature + TestWeakCipher_ExtendedLocation: missing_feature TestWeakCipher_StackTrace: missing_feature test_weak_hash.py: TestDeduplication: missing_feature TestWeakHash: missing_feature + TestWeakHash_ExtendedLocation: missing_feature TestWeakHash_StackTrace: missing_feature test_weak_randomness.py: TestWeakRandomness: missing_feature + TestWeakRandomness_ExtendedLocation: missing_feature TestWeakRandomness_StackTrace: missing_feature test_xcontent_sniffing.py: Test_XContentSniffing: missing_feature + Test_XContentSniffing_ExtendedLocation: missing_feature Test_XContentSniffing_StackTrace: missing_feature test_xpath_injection.py: TestXPathInjection: missing_feature + TestXPathInjection_ExtendedLocation: missing_feature TestXPathInjection_StackTrace: missing_feature test_xss.py: TestXSS: missing_feature + TestXSS_ExtendedLocation: missing_feature TestXSS_StackTrace: missing_feature source/: test_body.py: @@ -275,7 +304,7 @@ tests/: test_identify.py: Test_Basic: v1.0.0 test_ip_blocking_full_denylist.py: - Test_AppSecIPBlockingFullDenylist: missing_feature (Ruby supported denylists of 2500 entries but it fails to block this those 15000) + Test_AppSecIPBlockingFullDenylist: v2.9.0 test_logs.py: Test_Standardization: missing_feature Test_StandardizationBlockMode: missing_feature @@ -306,7 +335,7 @@ tests/: Test_ExternalWafRequestsIdentification: v1.22.0 Test_RetainTraces: v0.54.2 test_user_blocking_full_denylist.py: - Test_UserBlocking_FullDenylist: missing_feature (Ruby supported denylists of 2500 entries but it fails to block this those 15000) + Test_UserBlocking_FullDenylist: v2.9.0 test_versions.py: Test_Events: v0.54.2 debugger/: @@ -419,6 +448,7 @@ tests/: Test_Config_TraceEnabled: v2.0.0 Test_Config_TraceLogDirectory: missing_feature Test_Config_UnifiedServiceTagging: v2.5.0 + Test_Stable_Config_Default: missing_feature test_crashtracking.py: Test_Crashtracking: v2.3.0 test_dynamic_configuration.py: @@ -522,7 +552,10 @@ tests/: Test_Span_Links_Flags_From_Conflicting_Contexts: missing_feature (implementation specs have not been determined) Test_Span_Links_From_Conflicting_Contexts: missing_feature Test_Span_Links_Omit_Tracestate_From_Conflicting_Contexts: missing_feature (implementation specs have not been determined) - test_graphql.py: missing_feature + test_graphql.py: + Test_GraphQLQueryErrorReporting: + "*": irrelevant + graphql23: missing_feature test_identify.py: Test_Basic: v1.0.0 Test_Propagate: missing_feature @@ -554,6 +587,13 @@ tests/: Test_Meta: v1.7.0 Test_MetaDatadogTags: v1.9.0 Test_MetricsStandardTags: v1.7.0 + test_span_events.py: + Test_SpanEvents_WithAgentSupport: + "*": irrelevant + rack: v2.10.0 + Test_SpanEvents_WithoutAgentSupport: + "*": irrelevant + rack: v2.10.0 test_standard_tags.py: Test_StandardTagsClientIp: v1.10.1 Test_StandardTagsMethod: v1.8.0 diff --git a/pyproject.toml b/pyproject.toml index 42fb25ab88..5f73609c6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,20 +34,39 @@ markers =[ python_files = "test_*.py *utils.py" -# migration tooling: list of folders where feature declarations are mandatory -# once every test class got its feature declaration, we can remove this -allow_no_feature_nodes = [ - "tests/apm_tracing_e2e/test_single_span.py", - "tests/apm_tracing_e2e/test_smoke.py", - "tests/otel_tracing_e2e/test_e2e.py", - "tests/parametric/test_span_links.py", - "tests/parametric/test_tracer.py", - "tests/perfs/test_performances.py", # exotic scenario, not really used - "tests/test_the_test/", # Not a real test +[tool.mypy] +files = [ + "utils/", + "tests/", + "conftest.py", ] +exclude = [ + "utils/build/.*", # more permissive on weblog codes + "utils/grpc/weblog_pb2.py", # auto generated -[tool.mypy] -files = ["utils/parametric/**/*.py", "tests/parametric/**/*.py"] + # TODO + "tests/fuzzer/.*", + "tests/test_the_test/.*", + "tests/test_telemetry.py", + "tests/appsec/.*", + "tests/debugger/.*", + "tests/auto_inject/.*", + "tests/integrations/.*", + "tests/otel_tracing_e2e/test_e2e.py", + "tests/k8s_lib_injection/test_k8s_init_image_validator.py", + "utils/_context/_scenarios/auto_injection.py", + "utils/_context/_scenarios/k8s_lib_injection.py", + "utils/_context/core.py", + "utils/_context/virtual_machines.py", + "utils/scripts/decode-rc.py", + "utils/docker_ssi/.*", + "utils/k8s_lib_injection/.*", + "utils/onboarding/.*", + "utils/otel_validators/validator_trace.py", + "utils/proxy/_deserializer.py", + "utils/scripts/merge_gitlab_aws_pipelines.py", + "utils/virtual_machine/.*", +] ignore_missing_imports = true check_untyped_defs = true disable_error_code = ["no-redef"] @@ -143,6 +162,7 @@ ignore = [ "TD003", # todo found "TD004", # todo found "TRY300", # not always obvious + "UP015", # redundant-open-modes: yes it's redundant. But explicit is better than implicit "UP038", # not a big fan ] @@ -160,7 +180,80 @@ ignore = [ ] "utils/waf_rules.py" = ["N801"] # generated file # TODO : remove those ignores -"tests/*" = ["ALL"] +"tests/*" = [ + # lines with [*] can be autofixed with ruff check --fix --unsafe-fixes + # though, each change must be reviewed + "ANN201", # 2043 occurences [ ] missing-return-type-undocumented-public-function + "N801", # 492 occurences [ ] invalid-class-name + "ARG002", # 177 occurences [ ] unused-method-argument + "E501", # 159 occurences [ ] line-too-long + "SIM117", # 127 occurences [ ] multiple-with-statements + "TID252", # 77 occurences [*] relative-imports + "N806", # 76 occurences [ ] non-lowercase-variable-in-function + "FBT003", # 67 occurences [ ] boolean-positional-value-in-call + "D200", # 52 occurences [*] fits-on-one-line + "F405", # 43 occurences [ ] undefined-local-with-import-star-usage + "TRY002", # 41 occurences [ ] raise-vanilla-class + "PT018", # 39 occurences [ ] pytest-composite-assertion + "N802", # 39 occurences [ ] invalid-function-name + "FBT002", # 30 occurences [ ] boolean-default-value-positional-argument + "D404", # 25 occurences [ ] docstring-starts-with-this + "ANN401", # 24 occurences [ ] any-type + "B007", # 23 occurences [ ] unused-loop-control-variable + "DTZ005", # 23 occurences [ ] call-datetime-now-without-tzinfo + "INP001", # 22 occurences [ ] implicit-namespace-package + "ARG001", # 21 occurences [ ] unused-function-argument + "UP031", # 17 occurences [ ] printf-string-formatting + "ANN205", # 16 occurences [ ] missing-return-type-static-method + "T201", # 15 occurences [*] print + "FBT001", # 12 occurences [ ] boolean-type-hint-positional-argument + "SLF001", # 12 occurences [ ] private-member-access + "UP035", # 12 occurences [ ] deprecated-import + "S105", # 10 occurences [ ] hardcoded-password-string + "B015", # 10 occurences [ ] useless-comparison + "RET503", # 9 occurences [*] implicit-return + "SIM115", # 9 occurences [ ] open-file-with-context-handler + "PGH004", # 9 occurences [ ] blanket-noqa + "RUF001", # 9 occurences [ ] ambiguous-unicode-character-string + "RUF015", # 9 occurences [*] unnecessary-iterable-allocation-for-first-element + "SIM108", # 8 occurences [*] if-else-block-instead-of-if-exp + "PTH120", # 8 occurences [ ] os-path-dirname + "PGH003", # 8 occurences [ ] blanket-type-ignore + "TRY301", # 8 occurences [ ] raise-within-try + "ANN206", # 7 occurences [ ] missing-return-type-class-method + "B011", # 7 occurences [*] assert-false + "PT015", # 7 occurences [ ] pytest-assert-always-false + "N815", # 7 occurences [ ] mixed-case-variable-in-class-scope + "PT006", # 6 occurences [*] pytest-parametrize-names-wrong-type + "N803", # 6 occurences [ ] invalid-argument-name + "E741", # 6 occurences [ ] ambiguous-variable-name + "S113", # 5 occurences [ ] request-without-timeout + "PT011", # 5 occurences [ ] pytest-raises-too-broad + "E731", # 5 occurences [*] lambda-assignment + "RUF005", # 5 occurences [ ] collection-literal-concatenation + "PTH103", # 4 occurences [ ] os-makedirs + "PLW2901", # 4 occurences [ ] redefined-loop-name + "PTH112", # 3 occurences [ ] os-path-isdir + "ANN002", # 2 occurences [ ] missing-type-args + "ASYNC230", # 2 occurences [ ] blocking-open-call-in-async-function + "S605", # 2 occurences [ ] start-process-with-a-shell + "PTH100", # 2 occurences [ ] os-path-abspath + "PTH109", # 2 occurences [ ] os-getcwd + + # keep those exceptions + "C400", # unnecessary-generator-list: explicit list is more readable for non-python users + "C401", # unnecessary-generator-set: explicit set is more readable for non-python users + "C408", # unnecessary-collection-call: explicit tuple is more readable for non-python users + "C416", # unnecessary-comprehension: may make the code less readable for non-python users + "FIX003", # line-contains-xxx: freedom of speech! + "FIX004", # line-contains-xxx: freedom of speech! + "PLR1730", # if-stmt-min-max: not clear that it makes the code easier to read + "RET506", # superfluous-else-raise: requires a slightly higher cognitive effort to understand the code + "RET507", # superfluous-else-continue : requires a slightly higher cognitive effort to understand the code + "RET508", # superfluous-else-break: requires a slightly higher cognitive effort to understand the code + "RET505", # superfluous-else-return: requires a slightly higher cognitive effort to understand the code + "S108", # hardcoded-temp-file: test code may contains weird things +] "utils/build/*" = ["ALL"] "lib-injection/*" = ["ALL"] "utils/{k8s_lib_injection/*,_context/_scenarios/k8s_lib_injection.py}" = [ @@ -267,7 +360,6 @@ ignore = [ "SIM300", "SIM401", # code quality, TBD "UP008", - "UP015", "UP031", "UP024", diff --git a/requirements.txt b/requirements.txt index d1bdd97e55..f91da378ee 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,6 +33,8 @@ rfc3339-validator==0.1.4 ruff==0.8.1 scp==0.14.5 semantic-version==2.10.0 +types-protobuf==5.29.1.20241207 +types-PyYAML==6.0.12.20241230 types-requests==2.31.0 watchdog==3.0.0 yarl==1.9.4 diff --git a/tests/apm_tracing_e2e/test_otel.py b/tests/apm_tracing_e2e/test_otel.py index a176a6c825..86530e4480 100644 --- a/tests/apm_tracing_e2e/test_otel.py +++ b/tests/apm_tracing_e2e/test_otel.py @@ -24,7 +24,7 @@ def setup_datadog_otel_span(self): @flaky(library="golang", reason="APMAPI-178") def test_datadog_otel_span(self): spans = interfaces.agent.get_spans_list(self.req) - assert 2 <= len(spans), "Agent did not submit the spans we want!" + assert len(spans) >= 2, "Agent did not submit the spans we want!" # Assert the parent span sent by the agent. parent = _get_span_by_resource(spans, "root-otel-name.dd-resource") @@ -45,7 +45,7 @@ def test_datadog_otel_span(self): # Assert the spans received from the backend! spans = interfaces.backend.assert_request_spans_exist(self.req, query_filter="", retries=10) - assert 2 == len(spans) + assert len(spans) == 2 def setup_distributed_otel_trace(self): self.req = weblog.get( @@ -56,7 +56,7 @@ def setup_distributed_otel_trace(self): @flaky(library="golang", reason="APMAPI-178") def test_distributed_otel_trace(self): spans = interfaces.agent.get_spans_list(self.req) - assert 3 <= len(spans), "Agent did not submit the spans we want!" + assert len(spans) >= 3, "Agent did not submit the spans we want!" # Assert the parent span sent by the agent. parent = _get_span_by_resource(spans, "root-otel-name.dd-resource") @@ -77,7 +77,7 @@ def test_distributed_otel_trace(self): # Assert the spans received from the backend! spans = interfaces.backend.assert_request_spans_exist(self.req, query_filter="", retries=10) - assert 3 == len(spans) + assert len(spans) == 3 def _get_span_by_name(spans, span_name): diff --git a/tests/apm_tracing_e2e/test_single_span.py b/tests/apm_tracing_e2e/test_single_span.py index 4bec73f18d..c19b616711 100644 --- a/tests/apm_tracing_e2e/test_single_span.py +++ b/tests/apm_tracing_e2e/test_single_span.py @@ -1,4 +1,4 @@ -from utils import context, weblog, interfaces, rfc, scenarios, missing_feature +from utils import context, weblog, interfaces, rfc, scenarios, missing_feature, features from utils.dd_constants import ( SAMPLING_PRIORITY_KEY, SINGLE_SPAN_SAMPLING_MECHANISM, @@ -11,6 +11,7 @@ @rfc("ATI-2419") @missing_feature(context.agent_version < "7.40", reason="Single Spans is not available in agents pre 7.40.") @scenarios.apm_tracing_e2e_single_span +@features.single_span_ingestion_control class Test_SingleSpan: """This is a test that exercises the Single Span Ingestion Control feature. Read more about Single Span at https://docs.datadoghq.com/tracing/trace_pipeline/ingestion_mechanisms/?tab=java#single-spans @@ -28,7 +29,7 @@ def setup_parent_span_is_single_span(self): def test_parent_span_is_single_span(self): # Only the parent span should be submitted to the backend! spans = interfaces.agent.get_spans_list(self.req) - assert 1 == len(spans), "Agent did not submit the spans we want!" + assert len(spans) == 1, "Agent did not submit the spans we want!" # Assert the spans sent by the agent. span = spans[0] @@ -39,7 +40,7 @@ def test_parent_span_is_single_span(self): # Assert the spans received from the backend! spans = interfaces.backend.assert_single_spans_exist(self.req) - assert 1 == len(spans) + assert len(spans) == 1 _assert_single_span_event(spans[0], "parent.span.single_span_submitted", is_root=True) def setup_child_span_is_single_span(self): @@ -51,7 +52,7 @@ def setup_child_span_is_single_span(self): def test_child_span_is_single_span(self): # Only the child should be submitted to the backend! spans = interfaces.agent.get_spans_list(self.req) - assert 1 == len(spans), "Agent did not submit the spans we want!" + assert len(spans) == 1, "Agent did not submit the spans we want!" # Assert the spans sent by the agent. span = spans[0] @@ -61,13 +62,13 @@ def test_child_span_is_single_span(self): # Assert the spans received from the backend! spans = interfaces.backend.assert_single_spans_exist(self.req) - assert 1 == len(spans) + assert len(spans) == 1 _assert_single_span_event(spans[0], "child.span.single_span_submitted", is_root=False) def _assert_single_span_event(event, name, is_root): assert event["operation_name"] == name - assert event["single_span"] == True + assert event["single_span"] is True assert event["ingestion_reason"] == "single_span" parent_id = event["parent_id"] if is_root: diff --git a/tests/apm_tracing_e2e/test_smoke.py b/tests/apm_tracing_e2e/test_smoke.py index cdd5d30de2..9d015706c2 100644 --- a/tests/apm_tracing_e2e/test_smoke.py +++ b/tests/apm_tracing_e2e/test_smoke.py @@ -1,8 +1,9 @@ -from utils import weblog, interfaces, rfc, scenarios +from utils import weblog, interfaces, rfc, scenarios, features @rfc("https://docs.google.com/document/d/1MtSlvPCKWM4x4amOYAvlKVbJjd0b0oUXxxlX-lo8KN8/edit#") @scenarios.apm_tracing_e2e +@features.not_reported # the scenario is not executed class Test_Backend: """This is a smoke test that exercises the full flow of APM Tracing. It includes trace submission, the trace flowing through the backend processing, @@ -13,4 +14,4 @@ def setup_main(self): self.r = weblog.get("/") def test_main(self): - trace = interfaces.backend.assert_library_traces_exist(self.r) + interfaces.backend.assert_library_traces_exist(self.r) diff --git a/tests/appsec/api_security/test_api_security_rc.py b/tests/appsec/api_security/test_api_security_rc.py index fc464a0c05..b594ae55b9 100644 --- a/tests/appsec/api_security/test_api_security_rc.py +++ b/tests/appsec/api_security/test_api_security_rc.py @@ -13,13 +13,13 @@ def get_schema(request, address): - """get api security schema from spans""" + """Get api security schema from spans""" for _, _, span in interfaces.library.get_spans(request): meta = span.get("meta", {}) payload = meta.get("_dd.appsec.s." + address) if payload is not None: return payload - return + return None @rfc("https://docs.google.com/document/d/1Ig5lna4l57-tJLMnC76noGFJaIHvudfYXdZYKz6gXUo/edit#heading=h.88xvn2cvs9dt") @@ -33,7 +33,7 @@ def setup_request_method(self): self.request = weblog.get("/tag_value/api_rc_processor/200?key=value") def test_request_method(self): - """can provide custom req.querytest schema""" + """Can provide custom req.querytest schema""" schema = get_schema(self.request, "req.querytest") assert self.request.status_code == 200 assert schema @@ -53,7 +53,7 @@ def setup_request_method(self): self.request = weblog.post("/tag_value/api_rc_scanner/200", data={"mail": "systemtestmail@datadoghq.com"}) def test_request_method(self): - """can provide custom req.querytest schema""" + """Can provide custom req.querytest schema""" schema = get_schema(self.request, "req.bodytest") EXPECTED_MAIL_SCHEMA = [8, {"category": "pii", "type": "email"}] @@ -90,7 +90,7 @@ def setup_request_method(self): self.request = weblog.post("/tag_value/api_rc_processor_overrides/200", data={"testcard": "1234567890"}) def test_request_method(self): - """can provide custom req.querytest schema""" + """Can provide custom req.querytest schema""" schema = get_schema(self.request, "req.bodytest") EXPECTED_TESTCARD_SCHEMA = [8, {"category": "testcategory", "type": "card"}] diff --git a/tests/appsec/api_security/test_apisec_sampling.py b/tests/appsec/api_security/test_apisec_sampling.py index d67637ed2f..0b005b0e1c 100644 --- a/tests/appsec/api_security/test_apisec_sampling.py +++ b/tests/appsec/api_security/test_apisec_sampling.py @@ -1,5 +1,4 @@ from utils import ( - bug, context, features, interfaces, @@ -16,13 +15,13 @@ def get_schema(request, address): - """get api security schema from spans""" + """Get api security schema from spans""" for _, _, span in interfaces.library.get_spans(request): meta = span.get("meta", {}) payload = meta.get("_dd.appsec.s." + address) if payload is not None: return payload - return + return None @rfc("https://docs.google.com/document/d/1OCHPBCAErOL2FhLl64YAHB8woDyq66y5t-JGolxdf1Q/edit#heading=h.bth088vsbjrz") @@ -45,7 +44,7 @@ def setup_sampling_rate(self): context.library not in ("nodejs", "python"), reason="New sampling algorithm tests have been implemented" ) def test_sampling_rate(self): - """can provide request header schema""" + """Can provide request header schema""" N = self.N assert all(r.status_code == 200 for r in self.all_requests) s = sum(get_schema(r, "req.headers") is not None for r in self.all_requests) @@ -117,7 +116,7 @@ def setup_sampling_delay(self): self.all_requests = [weblog.get("/api_security/sampling/201") for _ in range(10)] def test_sampling_delay(self): - """can provide request header schema""" + """Can provide request header schema""" assert self.request1.status_code == 200 schema1 = get_schema(self.request1, "req.headers") @@ -146,7 +145,7 @@ def setup_sampling_delay(self): self.request3 = weblog.get("/api_security_sampling/30") def test_sampling_delay(self): - """can provide request header schema""" + """Can provide request header schema""" assert self.request1.status_code == 200 assert self.request2.status_code == 200 diff --git a/tests/appsec/api_security/test_schemas.py b/tests/appsec/api_security/test_schemas.py index 6aa79bc694..a8ee613c98 100644 --- a/tests/appsec/api_security/test_schemas.py +++ b/tests/appsec/api_security/test_schemas.py @@ -14,13 +14,13 @@ def get_schema(request, address): - """get api security schema from spans""" + """Get api security schema from spans""" for _, _, span in interfaces.library.get_spans(request): meta = span.get("meta", {}) payload = meta.get("_dd.appsec.s." + address) if payload is not None: return payload - return + return None # can be used to match any value in a schema @@ -28,7 +28,7 @@ def get_schema(request, address): def contains(t1, t2): - """validate that schema t1 contains all keys and values from t2""" + """Validate that schema t1 contains all keys and values from t2""" if t2 is ANY: return True if t1 is None or t2 is None: @@ -37,11 +37,11 @@ def contains(t1, t2): def equal_value(t1, t2): - """compare two schema type values, ignoring any metadata""" + """Compare two schema type values, ignoring any metadata""" if t2 is ANY: return True if isinstance(t1, list) and isinstance(t2, list): - return len(t1) == len(t2) and all(contains(a, b) for a, b in zip(t1, t2)) + return len(t1) == len(t2) and all(contains(a, b) for a, b in zip(t1, t2, strict=False)) if isinstance(t1, dict) and isinstance(t2, dict): return all(k in t1 and contains(t1[k], t2[k]) for k in t2) if isinstance(t1, int) and isinstance(t2, int): @@ -59,7 +59,7 @@ def setup_request_method(self): self.request = weblog.get("/tag_value/api_match_AS001/200") def test_request_method(self): - """can provide request header schema""" + """Can provide request header schema""" schema = get_schema(self.request, "req.headers") assert self.request.status_code == 200 assert schema @@ -82,7 +82,7 @@ def setup_request_method(self): @missing_feature(context.library < "python@1.19.0.dev") def test_request_method(self): - """can provide request header schema""" + """Can provide request header schema""" schema = get_schema(self.request, "req.cookies") assert self.request.status_code == 200 assert schema @@ -102,7 +102,7 @@ def setup_request_method(self): self.request = weblog.get("/tag_value/api_match_AS002/200?x=123&y=abc&z=%7B%22key%22%3A%22value%22%7D") def test_request_method(self): - """can provide request query parameters schema""" + """Can provide request query parameters schema""" schema = get_schema(self.request, "req.query") assert self.request.status_code == 200 assert schema @@ -122,7 +122,7 @@ def setup_request_method(self): self.request = weblog.get("/tag_value/api_match_AS003/200") def test_request_method(self): - """can provide request path parameters schema""" + """Can provide request path parameters schema""" schema = get_schema(self.request, "req.params") assert self.request.status_code == 200 assert schema @@ -147,7 +147,7 @@ def setup_request_method(self): self.request = weblog.post("/tag_value/api_match_AS004/200", json=payload) def test_request_method(self): - """can provide request request body schema""" + """Can provide request request body schema""" schema = get_schema(self.request, "req.body") assert self.request.status_code == 200 assert contains(schema, [{"main": [[[{"key": [8], "value": [16]}]], {"len": 2}], "nullable": [1]}]) @@ -172,7 +172,7 @@ def setup_request_method(self): ) def test_request_method(self): - """can provide request request body schema""" + """Can provide request request body schema""" schema = get_schema(self.request, "req.body") assert self.request.status_code == 200 assert ( @@ -203,7 +203,7 @@ def setup_request_method(self): self.request = weblog.get("/tag_value/api_match_AS005/200?X-option=test_value") def test_request_method(self): - """can provide response header schema""" + """Can provide response header schema""" schema = get_schema(self.request, "res.headers") assert self.request.status_code == 200 assert isinstance(schema, list) @@ -225,7 +225,7 @@ def setup_request_method(self): ) def test_request_method(self): - """can provide response body schema""" + """Can provide response body schema""" assert self.request.status_code == 200 schema = get_schema(self.request, "res.body") @@ -242,8 +242,7 @@ def test_request_method(self): @scenarios.appsec_api_security_no_response_body @features.api_security_schemas class Test_Schema_Response_Body_env_var: - """ - Test API Security - Response Body Schema with urlencoded body and env var disabling response body parsing + """Test API Security - Response Body Schema with urlencoded body and env var disabling response body parsing Check that response headers are still parsed but not response body """ @@ -254,7 +253,7 @@ def setup_request_method(self): ) def test_request_method(self): - """can provide response body schema""" + """Can provide response body schema""" assert self.request.status_code == 200 headers_schema = get_schema(self.request, "res.headers") @@ -282,7 +281,7 @@ def setup_request_method(self): @missing_feature(context.library < "python@1.19.0.dev") def test_request_method(self): - """can provide request header schema""" + """Can provide request header schema""" schema_cookies = get_schema(self.request, "req.cookies") schema_headers = get_schema(self.request, "req.headers") assert self.request.status_code == 200 diff --git a/tests/appsec/iast/sink/test_code_injection.py b/tests/appsec/iast/sink/test_code_injection.py index 2a9e0ee534..f92ce45b38 100644 --- a/tests/appsec/iast/sink/test_code_injection.py +++ b/tests/appsec/iast/sink/test_code_injection.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, features, rfc, weblog -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_stack_traces, validate_extended_location_data @features.iast_sink_code_injection @@ -19,7 +19,7 @@ class TestCodeInjection(BaseSinkTest): "nodejs": {"express4": "iast/index.js", "express4-typescript": "iast.ts", "express5": "iast/index.js"}, } - @missing_feature(library="nodejs", reason="Instrumented metric not implemented") + @missing_feature(context.library < "nodejs@5.34.0") def test_telemetry_metric_instrumented_sink(self): super().test_telemetry_metric_instrumented_sink() @@ -36,3 +36,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestCodeInjection_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "CODE_INJECTION" + + def setup_extended_location_data(self): + self.r = weblog.post("/iast/code_injection/test_insecure", data={"code": "1+2"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_command_injection.py b/tests/appsec/iast/sink/test_command_injection.py index 317367cb28..3c124fb758 100644 --- a/tests/appsec/iast/sink/test_command_injection.py +++ b/tests/appsec/iast/sink/test_command_injection.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, features, rfc, weblog -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.iast_sink_command_injection @@ -48,3 +48,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestCommandInjection_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "COMMAND_INJECTION" + + def setup_extended_location_data(self): + self.r = weblog.post("/iast/cmdi/test_insecure", data={"cmd": "ls"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_email_html_injection.py b/tests/appsec/iast/sink/test_email_html_injection.py index 740ecc7ec3..752be77662 100644 --- a/tests/appsec/iast/sink/test_email_html_injection.py +++ b/tests/appsec/iast/sink/test_email_html_injection.py @@ -2,8 +2,8 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import context, missing_feature, features, weblog, rfc -from ..utils import BaseSinkTest, validate_stack_traces +from utils import missing_feature, features, weblog, rfc +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.iast_sink_email_html_injection @@ -36,3 +36,19 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestEmailHtmlInjection_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "EMAIL_HTML_INJECTION" + + def setup_extended_location_data(self): + self.r = weblog.post( + "/iast/email_html_injection/test_insecure", data={"username": "Josh", "email": "fakeemail@localhost"} + ) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_hardcoded_passwords.py b/tests/appsec/iast/sink/test_hardcoded_passwords.py index 6f6c58359d..bb76545fa4 100644 --- a/tests/appsec/iast/sink/test_hardcoded_passwords.py +++ b/tests/appsec/iast/sink/test_hardcoded_passwords.py @@ -2,8 +2,8 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import interfaces, weblog, features, context, rfc -from ..utils import validate_stack_traces +from utils import weblog, features, context, rfc +from ..utils import get_hardcoded_vulnerabilities, validate_stack_traces # Test_HardcodedPasswords doesn't inherit from BaseSinkTest # Hardcode passwords detection implementation change a lot between different languages @@ -32,26 +32,12 @@ def setup_hardcoded_passwords_exec(self): def test_hardcoded_passwords_exec(self): assert self.r_hardcoded_passwords_exec.status_code == 200 - hardcoded_passwords = self.get_hardcoded_password_vulnerabilities() + hardcoded_passwords = get_hardcoded_vulnerabilities("HARDCODED_PASSWORD") hardcoded_passwords = [v for v in hardcoded_passwords if v["evidence"]["value"] == "hashpwd"] assert len(hardcoded_passwords) == 1 vuln = hardcoded_passwords[0] assert vuln["location"]["path"] == self._get_expectation(self.location_map) - def get_hardcoded_password_vulnerabilities(self): - spans = [s for _, s in interfaces.library.get_root_spans()] - assert spans, "No spans found" - spans_meta = [span.get("meta") for span in spans] - assert spans_meta, "No spans meta found" - iast_events = [meta.get("_dd.iast.json") for meta in spans_meta if meta.get("_dd.iast.json")] - assert iast_events, "No iast events found" - vulnerabilities = [event.get("vulnerabilities") for event in iast_events if event.get("vulnerabilities")] - assert vulnerabilities, "No vulnerabilities found" - vulnerabilities = sum(vulnerabilities, []) # set all the vulnerabilities in a single list - hardcoded_passwords = [vuln for vuln in vulnerabilities if vuln.get("type") == "HARDCODED_PASSWORD"] - assert hardcoded_passwords, "No hardcoded passwords found" - return hardcoded_passwords - def _get_expectation(self, d): expected = d.get(context.library.library) if isinstance(expected, dict): @@ -71,3 +57,23 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class Test_HardcodedPasswords_ExtendedLocation: + """Test extended location data""" + + def setup_extended_location_data(self): + self.r = weblog.get("/iast/hardcoded_passwords/test_insecure") + + def test_extended_location_data(self): + hardcoded_passwords = get_hardcoded_vulnerabilities("HARDCODED_PASSWORD") + hardcoded_passwords = [v for v in hardcoded_passwords if v["evidence"]["value"] == "hashpwd"] + assert len(hardcoded_passwords) == 1 + location = hardcoded_passwords[0]["location"] + + assert all(field in location for field in ["path", "line"]) + + if context.library.library not in ("python", "nodejs"): + assert all(field in location for field in ["class", "method"]) diff --git a/tests/appsec/iast/sink/test_hardcoded_secrets.py b/tests/appsec/iast/sink/test_hardcoded_secrets.py index 28eda35e9f..748c22056c 100644 --- a/tests/appsec/iast/sink/test_hardcoded_secrets.py +++ b/tests/appsec/iast/sink/test_hardcoded_secrets.py @@ -2,8 +2,8 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import interfaces, weblog, features, context, rfc, weblog -from ..utils import validate_stack_traces +from utils import features, context, rfc, weblog +from ..utils import get_hardcoded_vulnerabilities, validate_stack_traces # Test_HardcodedSecrets and Test_HardcodedSecretsExtended don't inherit from BaseSinkTest # Hardcode secrets detection implementation change a lot between different languages @@ -11,21 +11,6 @@ # as the vulnerability is not always set in the current request span. -def get_hardcoded_secret_vulnerabilities(): - spans = [s for _, s in interfaces.library.get_root_spans()] - assert spans, "No spans found" - spans_meta = [span.get("meta") for span in spans] - assert spans_meta, "No spans meta found" - iast_events = [meta.get("_dd.iast.json") for meta in spans_meta if meta.get("_dd.iast.json")] - assert iast_events, "No iast events found" - vulnerabilities = [event.get("vulnerabilities") for event in iast_events if event.get("vulnerabilities")] - assert vulnerabilities, "No vulnerabilities found" - vulnerabilities = sum(vulnerabilities, []) # set all the vulnerabilities in a single list - hardcoded_secrets = [vuln for vuln in vulnerabilities if vuln.get("type") == "HARDCODED_SECRET"] - assert hardcoded_secrets, "No hardcoded secrets found" - return hardcoded_secrets - - def get_expectation(d): expected = d.get(context.library.library) if isinstance(expected, dict): @@ -55,7 +40,7 @@ def setup_hardcoded_secrets_exec(self): def test_hardcoded_secrets_exec(self): assert self.r_hardcoded_secrets_exec.status_code == 200 - hardcode_secrets = get_hardcoded_secret_vulnerabilities() + hardcode_secrets = get_hardcoded_vulnerabilities("HARDCODED_SECRET") hardcode_secrets = [v for v in hardcode_secrets if v["evidence"]["value"] == "aws-access-token"] assert len(hardcode_secrets) == 1 vuln = hardcode_secrets[0] @@ -80,7 +65,7 @@ def setup_hardcoded_secrets_extended_exec(self): def test_hardcoded_secrets_extended_exec(self): assert self.r_hardcoded_secrets_exec.status_code == 200 - hardcoded_secrets = get_hardcoded_secret_vulnerabilities() + hardcoded_secrets = get_hardcoded_vulnerabilities("HARDCODED_SECRET") hardcoded_secrets = [v for v in hardcoded_secrets if v["evidence"]["value"] == "datadog-access-token"] assert len(hardcoded_secrets) == 1 vuln = hardcoded_secrets[0] @@ -99,3 +84,23 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class Test_HardcodedSecrets_ExtendedLocation: + """Test extended location data""" + + def setup_extended_location_data(self): + self.r = weblog.get("/iast/hardcoded_secrets/test_insecure") + + def test_extended_location_data(self): + hardcode_secrets = get_hardcoded_vulnerabilities("HARDCODED_SECRET") + hardcode_secrets = [v for v in hardcode_secrets if v["evidence"]["value"] == "aws-access-token"] + assert len(hardcode_secrets) == 1 + location = hardcode_secrets[0]["location"] + + assert all(field in location for field in ["path", "line"]) + + if context.library.library not in ("python", "nodejs"): + assert all(field in location for field in ["class", "method"]) diff --git a/tests/appsec/iast/sink/test_header_injection.py b/tests/appsec/iast/sink/test_header_injection.py index 0804d74fd1..14550ab6de 100644 --- a/tests/appsec/iast/sink/test_header_injection.py +++ b/tests/appsec/iast/sink/test_header_injection.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, features, missing_feature, rfc, weblog -from ..utils import BaseSinkTest, validate_stack_traces, assert_iast_vulnerability +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces, assert_iast_vulnerability class _BaseTestHeaderInjectionReflectedExclusion: @@ -117,3 +117,17 @@ class TestHeaderInjectionExclusionTransferEncoding(_BaseTestHeaderInjectionRefle origin_header = "accept-encoding" reflected_header = "transfer-encoding" headers = {"accept-encoding": "foo, bar"} + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestHeaderInjection_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "HEADER_INJECTION" + + def setup_extended_location_data(self): + self.r = weblog.post("/iast/header_injection/test_insecure", data={"test": "dummyvalue"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_hsts_missing_header.py b/tests/appsec/iast/sink/test_hsts_missing_header.py index 1b372e52f5..910e34a8cb 100644 --- a/tests/appsec/iast/sink/test_hsts_missing_header.py +++ b/tests/appsec/iast/sink/test_hsts_missing_header.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, features, rfc, weblog -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.iast_sink_hsts_missing_header @@ -39,3 +39,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class Test_HstsMissingHeader_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "HSTS_HEADER_MISSING" + + def setup_extended_location_data(self): + self.r = weblog.get("/iast/hstsmissing/test_insecure", headers={"X-Forwarded-Proto": "https"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type, False) diff --git a/tests/appsec/iast/sink/test_insecure_auth_protocol.py b/tests/appsec/iast/sink/test_insecure_auth_protocol.py index 754e682b19..a158be7012 100644 --- a/tests/appsec/iast/sink/test_insecure_auth_protocol.py +++ b/tests/appsec/iast/sink/test_insecure_auth_protocol.py @@ -2,8 +2,8 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import context, missing_feature, features, rfc, weblog -from ..utils import BaseSinkTest, validate_stack_traces +from utils import missing_feature, features, rfc, weblog +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.iast_sink_insecure_auth_protocol @@ -47,3 +47,22 @@ def setup_stack_trace(self): @missing_feature(library="java", reason="Not implemented yet") def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class Test_InsecureAuthProtocol_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "INSECURE_AUTH_PROTOCOL" + + def setup_extended_location_data(self): + self.r = weblog.get( + "/iast/insecure-auth-protocol/test_insecure", + headers={ + "Authorization": 'Digest username="WATERFORD", realm="Users", nonce="c5rcvu346qavqf3hnmsrnqj5up", uri="/api/partner/validate", response="57c8d9f11ec7a2f1ab13c5e166b2c505"' + }, + ) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_insecure_cookie.py b/tests/appsec/iast/sink/test_insecure_cookie.py index 09f750aff7..722d08ba10 100644 --- a/tests/appsec/iast/sink/test_insecure_cookie.py +++ b/tests/appsec/iast/sink/test_insecure_cookie.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, bug, weblog, features, rfc, scenarios, flaky -from ..utils import BaseSinkTest, BaseTestCookieNameFilter, validate_stack_traces +from ..utils import BaseSinkTest, BaseTestCookieNameFilter, validate_extended_location_data, validate_stack_traces @features.iast_sink_insecure_cookie @@ -62,3 +62,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestInsecureCookie_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "INSECURE_COOKIE" + + def setup_extended_location_data(self): + self.r = weblog.get("/iast/insecure-cookie/test_insecure") + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_ldap_injection.py b/tests/appsec/iast/sink/test_ldap_injection.py index 527fac132b..6124fee4ba 100644 --- a/tests/appsec/iast/sink/test_ldap_injection.py +++ b/tests/appsec/iast/sink/test_ldap_injection.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, features, rfc, weblog -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.iast_sink_ldap_injection @@ -42,3 +42,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestLDAPInjection_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "LDAP_INJECTION" + + def setup_extended_location_data(self): + self.r = weblog.post("/iast/ldapi/test_insecure", data={"username": "ssam", "password": "sammy"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_no_httponly_cookie.py b/tests/appsec/iast/sink/test_no_httponly_cookie.py index 819c921e37..5d759513f6 100644 --- a/tests/appsec/iast/sink/test_no_httponly_cookie.py +++ b/tests/appsec/iast/sink/test_no_httponly_cookie.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, bug, weblog, features, rfc, scenarios, flaky -from ..utils import BaseSinkTest, BaseTestCookieNameFilter, validate_stack_traces +from ..utils import BaseSinkTest, BaseTestCookieNameFilter, validate_extended_location_data, validate_stack_traces @features.iast_sink_http_only_cookie @@ -62,3 +62,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestNoHttponlyCookie_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "NO_HTTPONLY_COOKIE" + + def setup_extended_location_data(self): + self.r = weblog.get("/iast/no-httponly-cookie/test_insecure") + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_no_samesite_cookie.py b/tests/appsec/iast/sink/test_no_samesite_cookie.py index 28ccd6387c..4f7cd104af 100644 --- a/tests/appsec/iast/sink/test_no_samesite_cookie.py +++ b/tests/appsec/iast/sink/test_no_samesite_cookie.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, bug, weblog, features, rfc, scenarios, flaky -from ..utils import BaseSinkTest, BaseTestCookieNameFilter, validate_stack_traces +from ..utils import BaseSinkTest, BaseTestCookieNameFilter, validate_extended_location_data, validate_stack_traces @features.iast_sink_samesite_cookie @@ -62,3 +62,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestNoSamesiteCookie_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "NO_SAMESITE_COOKIE" + + def setup_extended_location_data(self): + self.r = weblog.get("/iast/no-samesite-cookie/test_insecure") + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_nosql_mongodb_injection.py b/tests/appsec/iast/sink/test_nosql_mongodb_injection.py index 419df3636e..e394140283 100644 --- a/tests/appsec/iast/sink/test_nosql_mongodb_injection.py +++ b/tests/appsec/iast/sink/test_nosql_mongodb_injection.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, scenarios, features, rfc, weblog -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @scenarios.integrations @@ -51,3 +51,18 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@scenarios.integrations +@features.iast_extended_location +class TestNoSqlMongodbInjection_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "NOSQL_MONGODB_INJECTION" + + def setup_extended_location_data(self): + self.r = weblog.post("/iast/mongodb-nosql-injection/test_insecure", data={"key": "somevalue"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_path_traversal.py b/tests/appsec/iast/sink/test_path_traversal.py index fd66fb8217..a42fd92633 100644 --- a/tests/appsec/iast/sink/test_path_traversal.py +++ b/tests/appsec/iast/sink/test_path_traversal.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, features, weblog, rfc -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.iast_sink_path_traversal @@ -48,3 +48,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestPathTraversal_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "PATH_TRAVERSAL" + + def setup_extended_location_data(self): + self.r = weblog.post("/iast/path_traversal/test_insecure", data={"path": "/var/log"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_reflection_injection.py b/tests/appsec/iast/sink/test_reflection_injection.py index 4c32f4a2b9..a109309165 100644 --- a/tests/appsec/iast/sink/test_reflection_injection.py +++ b/tests/appsec/iast/sink/test_reflection_injection.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import missing_feature, features, rfc, weblog -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.iast_sink_reflection_injection @@ -40,3 +40,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestReflectionInjection_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "REFLECTION_INJECTION" + + def setup_extended_location_data(self): + self.r = weblog.post("/iast/reflection_injection/test_insecure", data={"param": "ReflectionInjection"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_sql_injection.py b/tests/appsec/iast/sink/test_sql_injection.py index 1747838537..ffd01f314d 100644 --- a/tests/appsec/iast/sink/test_sql_injection.py +++ b/tests/appsec/iast/sink/test_sql_injection.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, features, bug, rfc, weblog -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.iast_sink_sql_injection @@ -26,7 +26,9 @@ def test_insecure(self): super().test_insecure() @missing_feature(context.library < "java@1.9.0", reason="Metrics not implemented") - @missing_feature(library="python", reason="Not implemented yet") + @missing_feature( + context.weblog_variant in ("fastapi", "flask-poc", "uwsgi-poc", "uds-flask"), reason="Not implemented yet" + ) @missing_feature(library="dotnet", reason="Not implemented yet") def test_telemetry_metric_instrumented_sink(self): super().test_telemetry_metric_instrumented_sink() @@ -35,7 +37,6 @@ def test_telemetry_metric_instrumented_sink(self): def test_telemetry_metric_executed_sink(self): super().test_telemetry_metric_executed_sink() - @missing_feature(library="python", reason="Endpoint responds 500") @missing_feature(context.weblog_variant == "jersey-grizzly2", reason="Endpoint responds 500") def test_secure(self): super().test_secure() @@ -53,3 +54,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestSqlInjection_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "SQL_INJECTION" + + def setup_extended_location_data(self): + self.r = weblog.post("/iast/sqli/test_insecure", data={"username": "shaquille_oatmeal", "password": "123456"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_ssrf.py b/tests/appsec/iast/sink/test_ssrf.py index 75ec87dafd..c8088bf92c 100644 --- a/tests/appsec/iast/sink/test_ssrf.py +++ b/tests/appsec/iast/sink/test_ssrf.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import bug, context, missing_feature, features, rfc, weblog -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.iast_sink_ssrf @@ -18,7 +18,7 @@ class TestSSRF(BaseSinkTest): location_map = { "java": "com.datadoghq.system_tests.iast.utils.SsrfExamples", "nodejs": {"express4": "iast/index.js", "express4-typescript": "iast.ts", "express5": "iast/index.js"}, - "python": {"flask-poc": "app.py", "django-poc": "app/urls.py"}, + "python": {"flask-poc": "app.py", "django-poc": "app/urls.py", "fastapi": "main.py"}, } @bug(context.library < "java@1.14.0", reason="APMRP-360") @@ -26,7 +26,6 @@ def test_insecure(self): super().test_insecure() @missing_feature(library="nodejs", reason="Endpoint not implemented") - @missing_feature(library="python", reason="Endpoint responds 403") @missing_feature(library="java", reason="Endpoint not implemented") def test_secure(self): super().test_secure() @@ -48,3 +47,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestSSRF_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "SSRF" + + def setup_extended_location_data(self): + self.r = weblog.post("/iast/ssrf/test_insecure", data={"url": "https://www.datadoghq.com"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_template_injection.py b/tests/appsec/iast/sink/test_template_injection.py index b54dea2d1d..2062f5e47e 100644 --- a/tests/appsec/iast/sink/test_template_injection.py +++ b/tests/appsec/iast/sink/test_template_injection.py @@ -2,8 +2,8 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import features -from ..utils import BaseSinkTest +from utils import features, weblog, rfc +from ..utils import BaseSinkTest, validate_extended_location_data @features.iast_sink_template_injection @@ -16,3 +16,17 @@ class TestTemplateInjection(BaseSinkTest): secure_endpoint = "/iast/template_injection/test_secure" data = {"template": "Hello"} + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestTemplateInjection_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "TEMPLATE_INJECTION" + + def setup_extended_location_data(self): + self.r = weblog.post("/iast/template_injection/test_insecure", data={"template": "Hello"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_trust_boundary_violation.py b/tests/appsec/iast/sink/test_trust_boundary_violation.py index ede2601f65..b58543b738 100644 --- a/tests/appsec/iast/sink/test_trust_boundary_violation.py +++ b/tests/appsec/iast/sink/test_trust_boundary_violation.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, features, rfc, weblog -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.iast_sink_trustboundaryviolation @@ -43,3 +43,20 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class Test_TrustBoundaryViolation_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "TRUST_BOUNDARY_VIOLATION" + + def setup_extended_location_data(self): + self.r = weblog.get( + "/iast/trust-boundary-violation/test_insecure", + params={"username": "shaquille_oatmeal", "password": "123456"}, + ) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type, False) diff --git a/tests/appsec/iast/sink/test_untrusted_deserialization.py b/tests/appsec/iast/sink/test_untrusted_deserialization.py index ed6919a66a..cd168fae32 100644 --- a/tests/appsec/iast/sink/test_untrusted_deserialization.py +++ b/tests/appsec/iast/sink/test_untrusted_deserialization.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import features, weblog, rfc -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.iast_sink_untrusted_deserialization @@ -28,7 +28,21 @@ class TestUntrustedDeserialization_StackTrace: """Validate stack trace generation""" def setup_stack_trace(self): - self.r = weblog.get("/iast/untrusted_deserialization/test_insecure") + self.r = weblog.get("/iast/untrusted_deserialization/test_insecure?name=example") def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestUntrustedDeserialization_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "UNTRUSTED_DESERIALIZATION" + + def setup_extended_location_data(self): + self.r = weblog.get("/iast/untrusted_deserialization/test_insecure?name=example") + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_unvalidated_redirect.py b/tests/appsec/iast/sink/test_unvalidated_redirect.py index 6caff55564..d758ffde4d 100644 --- a/tests/appsec/iast/sink/test_unvalidated_redirect.py +++ b/tests/appsec/iast/sink/test_unvalidated_redirect.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, irrelevant, features, missing_feature, rfc, weblog -from ..utils import BaseSinkTestWithoutTelemetry, validate_stack_traces +from ..utils import BaseSinkTestWithoutTelemetry, validate_extended_location_data, validate_stack_traces def _expected_location(): @@ -99,3 +99,35 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestUnvalidatedRedirect_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "UNVALIDATED_REDIRECT" + + def setup_extended_location_data(self): + self.r = weblog.post( + "/iast/unvalidated_redirect/test_insecure_redirect", data={"location": "http://dummy.location.com"} + ) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestUnvalidatedHeader_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "UNVALIDATED_REDIRECT" + + def setup_extended_location_data(self): + self.r = weblog.post( + "/iast/unvalidated_redirect/test_insecure_header", data={"location": "http://dummy.location.com"} + ) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_unvalidated_redirect_forward.py b/tests/appsec/iast/sink/test_unvalidated_redirect_forward.py index ca85e9cf69..2950a1e340 100644 --- a/tests/appsec/iast/sink/test_unvalidated_redirect_forward.py +++ b/tests/appsec/iast/sink/test_unvalidated_redirect_forward.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, features, missing_feature, rfc, weblog -from ..utils import BaseSinkTestWithoutTelemetry, validate_stack_traces +from ..utils import BaseSinkTestWithoutTelemetry, validate_extended_location_data, validate_stack_traces def _expected_location(): @@ -46,3 +46,19 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestUnvalidatedForward_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "UNVALIDATED_REDIRECT" + + def setup_extended_location_data(self): + self.r = weblog.post( + "/iast/unvalidated_redirect/test_insecure_forward", data={"location": "http://dummy.location.com"} + ) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_weak_cipher.py b/tests/appsec/iast/sink/test_weak_cipher.py index b2604f18ab..c65fa96259 100644 --- a/tests/appsec/iast/sink/test_weak_cipher.py +++ b/tests/appsec/iast/sink/test_weak_cipher.py @@ -2,7 +2,7 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, flaky, features, weblog, rfc -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.weak_cipher_detection @@ -46,3 +46,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestWeakCipher_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "WEAK_CIPHER" + + def setup_extended_location_data(self): + self.r = weblog.get("/iast/insecure_cipher/test_insecure_algorithm") + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_weak_hash.py b/tests/appsec/iast/sink/test_weak_hash.py index ecc322da20..2c9c30ae73 100644 --- a/tests/appsec/iast/sink/test_weak_hash.py +++ b/tests/appsec/iast/sink/test_weak_hash.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import weblog, context, missing_feature, features, rfc, scenarios -from ..utils import BaseSinkTest, assert_iast_vulnerability, validate_stack_traces +from ..utils import BaseSinkTest, assert_iast_vulnerability, validate_extended_location_data, validate_stack_traces def _expected_location(): @@ -19,12 +19,11 @@ def _expected_location(): if context.library.library == "python": if context.library.version >= "1.12.0": return "iast.py" + # old value: absolute path + elif context.weblog_variant == "uwsgi-poc": + return "/app/./iast.py" else: - # old value: absolute path - if context.weblog_variant == "uwsgi-poc": - return "/app/./iast.py" - else: - return "/app/iast.py" + return "/app/iast.py" def _expected_evidence(): @@ -84,7 +83,8 @@ def setup_insecure_hash_remove_duplicates(self): def test_insecure_hash_remove_duplicates(self): """If one line is vulnerable and it is executed multiple times (for instance in a loop) in a request, - we will report only one vulnerability""" + we will report only one vulnerability + """ assert_iast_vulnerability( request=self.r_insecure_hash_remove_duplicates, vulnerability_count=1, @@ -104,3 +104,17 @@ def test_insecure_hash_multiple(self): vulnerability_type="WEAK_HASH", expected_location=_expected_location(), ) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestWeakHash_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "WEAK_HASH" + + def setup_extended_location_data(self): + self.r = weblog.get("/iast/insecure_hashing/test_md5_algorithm") + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_weak_randomness.py b/tests/appsec/iast/sink/test_weak_randomness.py index 893441b408..f6de93ac95 100644 --- a/tests/appsec/iast/sink/test_weak_randomness.py +++ b/tests/appsec/iast/sink/test_weak_randomness.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import features, weblog, rfc -from ..utils import BaseSinkTestWithoutTelemetry, validate_stack_traces +from ..utils import BaseSinkTestWithoutTelemetry, validate_extended_location_data, validate_stack_traces @features.iast_sink_weakrandomness @@ -34,3 +34,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestWeakRandomness_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "WEAK_RANDOMNESS" + + def setup_extended_location_data(self): + self.r = weblog.get("/iast/weak_randomness/test_insecure") + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_xcontent_sniffing.py b/tests/appsec/iast/sink/test_xcontent_sniffing.py index 77e996ca0d..8a9ae376e5 100644 --- a/tests/appsec/iast/sink/test_xcontent_sniffing.py +++ b/tests/appsec/iast/sink/test_xcontent_sniffing.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import context, missing_feature, features, rfc, weblog -from ..utils import BaseSinkTest, validate_stack_traces +from ..utils import BaseSinkTest, validate_extended_location_data, validate_stack_traces @features.iast_sink_xcontentsniffing @@ -37,3 +37,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class Test_XContentSniffing_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "XCONTENTTYPE_HEADER_MISSING" + + def setup_extended_location_data(self): + self.r = weblog.get("/iast/xcontent-missing-header/test_insecure") + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type, False) diff --git a/tests/appsec/iast/sink/test_xpath_injection.py b/tests/appsec/iast/sink/test_xpath_injection.py index 3add0f480b..481f0f9a1c 100644 --- a/tests/appsec/iast/sink/test_xpath_injection.py +++ b/tests/appsec/iast/sink/test_xpath_injection.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import features, weblog, rfc -from ..utils import BaseSinkTestWithoutTelemetry, validate_stack_traces +from ..utils import BaseSinkTestWithoutTelemetry, validate_extended_location_data, validate_stack_traces @features.iast_sink_xpathinjection @@ -30,3 +30,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestXPathInjection_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "XPATH_INJECTION" + + def setup_extended_location_data(self): + self.r = weblog.post("/iast/xpathi/test_insecure", data={"expression": "expression"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/sink/test_xss.py b/tests/appsec/iast/sink/test_xss.py index 74449793c7..d2735032c7 100644 --- a/tests/appsec/iast/sink/test_xss.py +++ b/tests/appsec/iast/sink/test_xss.py @@ -3,7 +3,7 @@ # Copyright 2021 Datadog, Inc. from utils import features, weblog, rfc -from ..utils import BaseSinkTestWithoutTelemetry, validate_stack_traces +from ..utils import BaseSinkTestWithoutTelemetry, validate_extended_location_data, validate_stack_traces @features.iast_sink_xss @@ -30,3 +30,17 @@ def setup_stack_trace(self): def test_stack_trace(self): validate_stack_traces(self.r) + + +@rfc("https://docs.google.com/document/d/1R8AIuQ9_rMHBPdChCb5jRwPrg1WvIz96c_WQ3y8DWk4") +@features.iast_extended_location +class TestXSS_ExtendedLocation: + """Test extended location data""" + + vulnerability_type = "XSS" + + def setup_extended_location_data(self): + self.r = weblog.post("/iast/xss/test_insecure", data={"param": "param"}) + + def test_extended_location_data(self): + validate_extended_location_data(self.r, self.vulnerability_type) diff --git a/tests/appsec/iast/source/test_cookie_value.py b/tests/appsec/iast/source/test_cookie_value.py index 800f0b0462..699b01dce2 100644 --- a/tests/appsec/iast/source/test_cookie_value.py +++ b/tests/appsec/iast/source/test_cookie_value.py @@ -2,7 +2,7 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import context, bug, missing_feature, features +from utils import context, missing_feature, features from ..utils import BaseSourceTest diff --git a/tests/appsec/iast/source/test_graphql_resolver.py b/tests/appsec/iast/source/test_graphql_resolver.py index a80db68c26..ff29024b4e 100644 --- a/tests/appsec/iast/source/test_graphql_resolver.py +++ b/tests/appsec/iast/source/test_graphql_resolver.py @@ -2,7 +2,7 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import context, missing_feature, bug, features, scenarios +from utils import features, scenarios from ..utils import BaseSourceTest diff --git a/tests/appsec/iast/source/test_header_value.py b/tests/appsec/iast/source/test_header_value.py index 579ea92aec..f31dcbfdf5 100644 --- a/tests/appsec/iast/source/test_header_value.py +++ b/tests/appsec/iast/source/test_header_value.py @@ -2,7 +2,7 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import context, bug, missing_feature, features +from utils import context, missing_feature, features from ..utils import BaseSourceTest diff --git a/tests/appsec/iast/source/test_kafka_key.py b/tests/appsec/iast/source/test_kafka_key.py index 06530135c7..a45dc73fef 100644 --- a/tests/appsec/iast/source/test_kafka_key.py +++ b/tests/appsec/iast/source/test_kafka_key.py @@ -2,7 +2,7 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import features, scenarios, flaky, context +from utils import features, scenarios from ..utils import BaseSourceTest, get_all_iast_events, get_iast_sources diff --git a/tests/appsec/iast/source/test_kafka_value.py b/tests/appsec/iast/source/test_kafka_value.py index eefde19671..0b22493f15 100644 --- a/tests/appsec/iast/source/test_kafka_value.py +++ b/tests/appsec/iast/source/test_kafka_value.py @@ -2,7 +2,7 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import features, scenarios, flaky, context +from utils import features, scenarios from ..utils import BaseSourceTest, get_all_iast_events, get_iast_sources diff --git a/tests/appsec/iast/source/test_parameter_name.py b/tests/appsec/iast/source/test_parameter_name.py index bdfc00f9e1..b0099b9a7e 100644 --- a/tests/appsec/iast/source/test_parameter_name.py +++ b/tests/appsec/iast/source/test_parameter_name.py @@ -2,7 +2,7 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import context, missing_feature, bug, features, flaky +from utils import context, missing_feature, bug, features from ..utils import BaseSourceTest @@ -27,9 +27,8 @@ class TestParameterName(BaseSourceTest): ) @bug(weblog_variant="resteasy-netty3", reason="APPSEC-55687") @missing_feature(library="dotnet", reason="Tainted as request body") - @flaky(context.weblog_variant == "python3.12", reason="APPSEC-56375") def test_source_post_reported(self): - """for use case where only one is reported, we want to keep a test on the one reported""" + """For use case where only one is reported, we want to keep a test on the one reported""" self.validate_request_reported(self.requests["POST"]) setup_source_get_reported = BaseSourceTest.setup_source_reported @@ -40,7 +39,7 @@ def test_source_post_reported(self): ) @bug(weblog_variant="resteasy-netty3", reason="APPSEC-55687") def test_source_get_reported(self): - """for use case where only one is reported, we want to keep a test on the one reported""" + """For use case where only one is reported, we want to keep a test on the one reported""" self.validate_request_reported(self.requests["GET"]) @missing_feature( @@ -53,7 +52,6 @@ def test_source_get_reported(self): ) @bug(weblog_variant="resteasy-netty3", reason="APPSEC-55687") @missing_feature(library="dotnet", reason="Tainted as request body") - @flaky(context.weblog_variant == "python3.12", reason="APPSEC-56375") def test_source_reported(self): super().test_source_reported() diff --git a/tests/appsec/iast/utils.py b/tests/appsec/iast/utils.py index 23a94f5217..e7756b8634 100644 --- a/tests/appsec/iast/utils.py +++ b/tests/appsec/iast/utils.py @@ -34,6 +34,7 @@ def get_iast_event(request): def assert_iast_vulnerability( request, vulnerability_count=None, vulnerability_type=None, expected_location=None, expected_evidence=None ): + oldVersion = context.library < "java@1.47.0" iast = get_iast_event(request=request) assert iast["vulnerabilities"], "Expected at least one vulnerability" vulns = iast["vulnerabilities"] @@ -41,7 +42,12 @@ def assert_iast_vulnerability( vulns = [v for v in vulns if v["type"] == vulnerability_type] assert vulns, f"No vulnerability of type {vulnerability_type}" if expected_location: - vulns = [v for v in vulns if v.get("location", {}).get("path", "") == expected_location] + vulns = [ + v + for v in vulns + if (oldVersion and v.get("location", {}).get("path", "") == expected_location) + or (v.get("location", {}).get("class", "") == expected_location) + ] assert vulns, f"No vulnerability with location {expected_location}" if expected_evidence: vulns = [v for v in vulns if v.get("evidence", {}).get("value", "") == expected_evidence] @@ -85,9 +91,13 @@ def get_all_iast_events(): def get_iast_sources(iast_events): - sources = [event.get("sources") for event in iast_events if event.get("sources")] + sources: list = [] + + for event in iast_events: + sources.extend(event.get("sources", [])) + assert sources, "No sources found" - sources = sum(sources, []) # set all the sources in a single list + return sources @@ -199,9 +209,12 @@ def validate_stack_traces(request): iast = meta["_dd.iast.json"] assert iast["vulnerabilities"], "Expected at least one vulnerability" - stack_trace = span["meta_struct"]["_dd.stack"]["vulnerability"][0] - vulns = [i for i in iast["vulnerabilities"] if i["stackId"] == stack_trace["id"]] - assert len(vulns) == 1, "Expected a single vulnerability with the stack trace Id" + stack_traces = span["meta_struct"]["_dd.stack"]["vulnerability"] + stack_trace = stack_traces[0] + vulns = [i for i in iast["vulnerabilities"] if i.get("stackId") == stack_trace["id"]] + assert ( + len(vulns) == 1 + ), f"Expected a single vulnerability with the stack trace Id.\nVulnerabilities: {vulns}\nStack trace: {stack_traces}" vuln = vulns[0] assert vuln["stackId"], "no 'stack_id's present'" @@ -227,29 +240,126 @@ def validate_stack_traces(request): assert "frames" in stack_trace, "'frames' not found in stack trace" assert len(stack_trace["frames"]) <= 32, "stack trace above size limit (32 frames)" - # Vulns without location path are not expected to have a stack trace + # Vulns without location path/class are not expected to have a stack trace + old_version = context.library < "java@1.47.0" location = vuln["location"] - assert location is not None and "path" in location, "This vulnerability is not expected to have a stack trace" + if old_version: + assert ( + location is not None and "path" in location + ), "This vulnerability was expected to have a path in its location data, it is not possible to validate the stack trace" + else: + assert ( + location is not None and "class" in location + ), "This vulnerability was expected to have a class in its location data, it is not possible to validate the stack trace" locationFrame = None + for frame in stack_trace["frames"]: # We are looking for the frame that corresponds to the location of the vulnerability, we will need to update this to cover all tracers # currently support: Java, Python, Node.js if ( - stack_trace["language"] == "java" - and ( - location["path"] in frame["class_name"] - and location["method"] in frame["function"] - and location["line"] == frame["line"] + ( + stack_trace["language"] == "java" + and old_version + and ( + location["path"] in frame["class_name"] + and location["method"] in frame["function"] + and location["line"] == frame["line"] + ) + ) + or ( + stack_trace["language"] == "java" + and ( + location["class"] in frame["class_name"] + and location["method"] in frame["function"] + and location["line"] == frame["line"] + ) + ) + or ( + stack_trace["language"] in ("python", "nodejs") + and (frame.get("file", "").endswith(location["path"]) and location["line"] == frame["line"]) ) - ) or ( - stack_trace["language"] in ("python", "nodejs") - and (frame.get("file", "").endswith(location["path"]) and location["line"] == frame["line"]) ): locationFrame = frame assert locationFrame is not None, "location not found in stack trace" +def validate_extended_location_data(request, vulnerability_type, is_expected_location_required=True): + spans = [span for _, span in interfaces.library.get_root_spans(request=request)] + assert spans, "No root span found" + span = spans[0] + + iast = span.get("meta", {}).get("_dd.iast.json") + assert iast and iast["vulnerabilities"], "Expected at least one vulnerability" + + # Filter by vulnerability + if vulnerability_type: + vulns = [v for v in iast["vulnerabilities"] if not vulnerability_type or v["type"] == vulnerability_type] + assert vulns, f"No vulnerability of type {vulnerability_type}" + + if not is_expected_location_required: + return + + vuln = vulns[0] + location = vuln["location"] + + # Check extended data if stack trace exists + if "meta_struct" in span and "_dd.stack" in span["meta_struct"]: + assert "vulnerability" in span["meta_struct"]["_dd.stack"], "'exploit' not found in '_dd.stack'" + stack_trace = span["meta_struct"]["_dd.stack"]["vulnerability"][0] + + assert "language" in stack_trace + assert stack_trace["language"] in ( + "php", + "python", + "nodejs", + "java", + "dotnet", + "go", + "ruby", + ), "unexpected language" + assert "frames" in stack_trace + + # Verify frame matches location + location_match = False + for frame in stack_trace["frames"]: + if ( + frame.get("file", "").endswith(location["path"]) + and location["line"] == frame["line"] + and location.get("class", "") == frame.get("class_name", "") + and location.get("method", "") == frame.get("function", "") + ): + location_match = True + break + + assert location_match, "location not found in stack trace" + # Check extended data if on location if stack trace do not exists + else: + assert all(field in location for field in ["path", "line"]) + + if context.library.library not in ("python", "nodejs"): + assert all(field in location for field in ["class", "method"]) + + +def get_hardcoded_vulnerabilities(vulnerability_type): + spans = [s for _, s in interfaces.library.get_root_spans()] + assert spans, "No spans found" + spans_meta = [span.get("meta") for span in spans] + assert spans_meta, "No spans meta found" + iast_events = [meta.get("_dd.iast.json") for meta in spans_meta if meta.get("_dd.iast.json")] + assert iast_events, "No iast events found" + + vulnerabilities: list = [] + for event in iast_events: + vulnerabilities.extend(event.get("vulnerabilities", [])) + + assert vulnerabilities, "No vulnerabilities found" + + hardcoded_vulns = [vuln for vuln in vulnerabilities if vuln.get("type") == vulnerability_type] + assert hardcoded_vulns, "No hardcoded vulnerabilities found" + return hardcoded_vulns + + class BaseSinkTest(BaseSinkTestWithoutTelemetry): def setup_telemetry_metric_instrumented_sink(self): self.setup_insecure() @@ -365,8 +475,7 @@ def check_test_telemetry_should_execute(self): def get_sources(self, request): iast = get_iast_event(request=request) - sources = iast["sources"] - return sources + return iast["sources"] def validate_request_reported(self, request, source_type=None): if source_type is None: # allow to overwrite source_type for parameter value node's use case diff --git a/tests/appsec/rasp/rasp_non_blocking_ruleset.json b/tests/appsec/rasp/rasp_non_blocking_ruleset.json new file mode 100644 index 0000000000..2911683177 --- /dev/null +++ b/tests/appsec/rasp/rasp_non_blocking_ruleset.json @@ -0,0 +1,282 @@ +{ + "version": "2.1", + "metadata": { + "rules_version": "1.99.0" + }, + "rules": [ + { + "id": "rasp-930-111", + "name": "Local file inclusion exploit", + "tags": { + "type": "lfi", + "category": "vulnerability_trigger", + "cwe": "22", + "capec": "1000/255/153/126", + "confidence": "0", + "module": "rasp" + }, + "conditions": [ + { + "parameters": { + "inputs": [ + { + "address": "server.request.query" + } + ], + "regex": "asdnjakslnbdklasbdkasbdkl" + }, + "operator": "match_regex" + } + ], + "transformers": [], + "on_match": [ + "stack_trace" + ] + }, + + { + "id": "rasp-930-100", + "name": "Local file inclusion exploit", + "tags": { + "type": "lfi", + "category": "vulnerability_trigger", + "cwe": "22", + "capec": "1000/255/153/126", + "confidence": "0", + "module": "rasp" + }, + "conditions": [ + { + "parameters": { + "resource": [ + { + "address": "server.io.fs.file" + } + ], + "params": [ + { + "address": "server.request.query" + }, + { + "address": "server.request.body" + }, + { + "address": "server.request.path_params" + }, + { + "address": "grpc.server.request.message" + }, + { + "address": "graphql.server.all_resolvers" + }, + { + "address": "graphql.server.resolver" + } + ] + }, + "operator": "lfi_detector" + } + ], + "transformers": [], + "on_match": [ + "stack_trace" + ] + }, + { + "id": "rasp-934-100", + "name": "Server-side request forgery exploit", + "tags": { + "type": "ssrf", + "category": "vulnerability_trigger", + "cwe": "918", + "capec": "1000/225/115/664", + "confidence": "0", + "module": "rasp" + }, + "conditions": [ + { + "parameters": { + "resource": [ + { + "address": "server.io.net.url" + } + ], + "params": [ + { + "address": "server.request.query" + }, + { + "address": "server.request.body" + }, + { + "address": "server.request.path_params" + }, + { + "address": "grpc.server.request.message" + }, + { + "address": "graphql.server.all_resolvers" + }, + { + "address": "graphql.server.resolver" + } + ] + }, + "operator": "ssrf_detector" + } + ], + "transformers": [], + "on_match": [ + "stack_trace" + ] + }, + { + "id": "rasp-942-100", + "name": "SQL injection exploit", + "tags": { + "type": "sql_injection", + "category": "vulnerability_trigger", + "cwe": "89", + "capec": "1000/152/248/66", + "confidence": "0", + "module": "rasp" + }, + "conditions": [ + { + "parameters": { + "resource": [ + { + "address": "server.db.statement" + } + ], + "params": [ + { + "address": "server.request.query" + }, + { + "address": "server.request.body" + }, + { + "address": "server.request.path_params" + }, + { + "address": "graphql.server.all_resolvers" + }, + { + "address": "graphql.server.resolver" + } + ], + "db_type": [ + { + "address": "server.db.system" + } + ] + }, + "operator": "sqli_detector" + } + ], + "transformers": [], + "on_match": [ + "stack_trace" + ] + }, + { + "id": "rasp-932-100", + "name": "Shell injection exploit", + "enabled": true, + "tags": { + "type": "command_injection", + "category": "vulnerability_trigger", + "cwe": "77", + "capec": "1000/152/248/88", + "confidence": "0", + "module": "rasp" + }, + "conditions": [ + { + "parameters": { + "resource": [ + { + "address": "server.sys.shell.cmd" + } + ], + "params": [ + { + "address": "server.request.query" + }, + { + "address": "server.request.body" + }, + { + "address": "server.request.path_params" + }, + { + "address": "grpc.server.request.message" + }, + { + "address": "graphql.server.all_resolvers" + }, + { + "address": "graphql.server.resolver" + } + ] + }, + "operator": "shi_detector" + } + ], + "transformers": [], + "on_match": [ + "stack_trace" + ] + }, + { + "id": "rasp-932-110", + "name": "OS command injection exploit", + "enabled": true, + "tags": { + "type": "command_injection", + "category": "vulnerability_trigger", + "cwe": "77", + "capec": "1000/152/248/88", + "confidence": "0", + "module": "rasp" + }, + "conditions": [ + { + "parameters": { + "resource": [ + { + "address": "server.sys.exec.cmd" + } + ], + "params": [ + { + "address": "server.request.query" + }, + { + "address": "server.request.body" + }, + { + "address": "server.request.path_params" + }, + { + "address": "grpc.server.request.message" + }, + { + "address": "graphql.server.all_resolvers" + }, + { + "address": "graphql.server.resolver" + } + ] + }, + "operator": "cmdi_detector" + } + ], + "transformers": [], + "on_match": [ + "stack_trace" + ] + } + ] +} diff --git a/tests/appsec/rasp/test_lfi.py b/tests/appsec/rasp/test_lfi.py index 3af0533e8e..dceaf99e11 100644 --- a/tests/appsec/rasp/test_lfi.py +++ b/tests/appsec/rasp/test_lfi.py @@ -134,6 +134,22 @@ def test_lfi_span_tags(self): validate_span_tags(self.r, expected_metrics=["_dd.appsec.rasp.duration_ext", "_dd.appsec.rasp.rule.eval"]) +@rfc("https://docs.google.com/document/d/1vmMqpl8STDk7rJnd3YBsa6O9hCls_XHHdsodD61zr_4/edit#heading=h.96mezjnqf46y") +@features.rasp_span_tags +@features.rasp_local_file_inclusion +@scenarios.appsec_rasp_non_blocking +class Test_Lfi_Telemetry_Multiple_Exploits: + """Validate rasp match telemetry metric works""" + + def setup_rasp_match_tag(self): + self.r = weblog.get("/rasp/multiple", params={"file1": "../etc/passwd", "file2": "../etc/group"}) + + def test_rasp_match_tag(self): + series_eval = find_series(True, "appsec", "rasp.rule.match") + assert series_eval + assert series_eval[0]["points"][0][1] == 3.0 + + @rfc("https://docs.google.com/document/d/1vmMqpl8STDk7rJnd3YBsa6O9hCls_XHHdsodD61zr_4/edit#heading=h.enmf90juqidf") @features.rasp_stack_trace @features.rasp_local_file_inclusion diff --git a/tests/appsec/rasp/test_sqli.py b/tests/appsec/rasp/test_sqli.py index 6578158dd4..4de2cb4fa3 100644 --- a/tests/appsec/rasp/test_sqli.py +++ b/tests/appsec/rasp/test_sqli.py @@ -2,7 +2,7 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import features, weblog, interfaces, scenarios, rfc, context +from utils import features, weblog, interfaces, scenarios, rfc from utils.dd_constants import Capabilities from tests.appsec.rasp.utils import ( validate_span_tags, diff --git a/tests/appsec/rasp/utils.py b/tests/appsec/rasp/utils.py index 1bffabb896..8a29b88cb0 100644 --- a/tests/appsec/rasp/utils.py +++ b/tests/appsec/rasp/utils.py @@ -7,7 +7,7 @@ from utils import interfaces -def validate_span_tags(request, expected_meta=[], expected_metrics=[]): +def validate_span_tags(request, expected_meta=(), expected_metrics=()): """Validate RASP span tags are added when an event is generated""" spans = [s for _, s in interfaces.library.get_root_spans(request=request)] assert spans, "No spans to validate" @@ -84,20 +84,20 @@ def find_series(is_metrics: bool, namespace, metric): return series -def validate_metric(name, type, metric): +def validate_metric(name, metric_type, metric): return ( metric.get("metric") == name and metric.get("type") == "count" - and f"rule_type:{type}" in metric.get("tags", ()) + and f"rule_type:{metric_type}" in metric.get("tags", ()) and any(s.startswith("waf_version:") for s in metric.get("tags", ())) ) -def validate_metric_variant(name, type, variant, metric): +def validate_metric_variant(name, metric_type, variant, metric): return ( metric.get("metric") == name and metric.get("type") == "count" - and f"rule_type:{type}" in metric.get("tags", ()) + and f"rule_type:{metric_type}" in metric.get("tags", ()) and f"rule_variant:{variant}" in metric.get("tags", ()) and any(s.startswith("waf_version:") for s in metric.get("tags", ())) ) diff --git a/tests/appsec/test_asm_standalone.py b/tests/appsec/test_asm_standalone.py index 45328214c7..dc1eabcd14 100644 --- a/tests/appsec/test_asm_standalone.py +++ b/tests/appsec/test_asm_standalone.py @@ -41,24 +41,23 @@ def _assert_tags_value(span, obj, expected_tags): for tag, value in expected_tags.items(): if value is None: assert tag not in struct + elif tag == "_sampling_priority_v1": # special case, it's a lambda to check for a condition + assert value(struct[tag]) else: - if tag == "_sampling_priority_v1": # special case, it's a lambda to check for a condition - assert value(struct[tag]) - else: - assert struct[tag] == value + assert struct[tag] == value # Case 1: The tags are set on the first span of every trace chunk try: _assert_tags_value(first_trace, obj, expected_tags) return True - except (KeyError, AssertionError) as e: + except (KeyError, AssertionError): pass # should try the second case # Case 2: The tags are set on the local root span try: _assert_tags_value(span, obj, expected_tags) return True - except (KeyError, AssertionError) as e: + except (KeyError, AssertionError): return False @staticmethod @@ -703,7 +702,7 @@ def test_telemetry_sca_enabled_propagated(self): DD_APPSEC_SCA_ENABLED = TelemetryUtils.get_dd_appsec_sca_enabled_str(context.library) cfg_appsec_enabled = configuration_by_name.get(DD_APPSEC_SCA_ENABLED) - assert cfg_appsec_enabled is not None, "Missing telemetry config item for '{}'".format(DD_APPSEC_SCA_ENABLED) + assert cfg_appsec_enabled is not None, f"Missing telemetry config item for '{DD_APPSEC_SCA_ENABLED}'" outcome_value = True if context.library == "java": @@ -741,6 +740,32 @@ def test_app_dependencies_loaded(self): raise Exception(dependency + " not received in app-dependencies-loaded message") +@rfc("https://docs.google.com/document/d/12NBx-nD-IoQEMiCRnJXneq4Be7cbtSc6pJLOFUWTpNE/edit") +@features.appsec_standalone +@scenarios.appsec_no_stats +class Test_AppSecStandalone_NotEnabled: + """Test expected behaviour when standalone is not enabled.""" + + def setup_client_computed_stats_header_is_not_present(self): + trace_id = 1212121212121212122 + parent_id = 34343434 + self.r = weblog.get( + "/", + headers={ + "x-datadog-trace-id": str(trace_id), + "x-datadog-parent-id": str(parent_id), + }, + ) + + def test_client_computed_stats_header_is_not_present(self): + spans_checked = 0 + for data, _, span in interfaces.library.get_spans(request=self.r): + assert span["trace_id"] == 1212121212121212122 + assert "datadog-client-computed-stats" not in [x.lower() for x, y in data["request"]["headers"]] + spans_checked += 1 + assert spans_checked == 1 + + @rfc("https://docs.google.com/document/d/12NBx-nD-IoQEMiCRnJXneq4Be7cbtSc6pJLOFUWTpNE/edit") @features.appsec_standalone @scenarios.appsec_standalone @@ -761,10 +786,10 @@ class Test_AppSecStandalone_UpstreamPropagation_V2(AppSecStandalone_UpstreamProp """APPSEC correctly propagates AppSec events in distributing tracing with DD_APM_TRACING_ENABLED=false.""" def propagated_tag(self): - return "_dd.p.appsec" + return "_dd.p.ts" def propagated_tag_value(self): - return "1" + return "02" @rfc("https://docs.google.com/document/d/12NBx-nD-IoQEMiCRnJXneq4Be7cbtSc6pJLOFUWTpNE/edit") @@ -800,10 +825,10 @@ class Test_SCAStandalone_Telemetry(SCAStandalone_Telemetry_Base): """Tracer correctly propagates SCA telemetry in distributing tracing with DD_EXPERIMENTAL_APPSEC_STANDALONE_ENABLED=true.""" def propagated_tag(self): - return "_dd.p.ts" + return "_dd.p.appsec" def propagated_tag_value(self): - return "02" + return "1" @rfc("https://docs.google.com/document/d/12NBx-nD-IoQEMiCRnJXneq4Be7cbtSc6pJLOFUWTpNE/edit") diff --git a/tests/appsec/test_automated_login_events.py b/tests/appsec/test_automated_login_events.py index 714d2c2050..d7b676446c 100644 --- a/tests/appsec/test_automated_login_events.py +++ b/tests/appsec/test_automated_login_events.py @@ -66,7 +66,7 @@ def login_data(context, username, password): @rfc("https://docs.google.com/document/d/1-trUpphvyZY7k5ldjhW-MgqWl0xOm7AMEQDJEAZ63_Q/edit#heading=h.8d3o7vtyu1y1") @features.user_monitoring class Test_Login_Events: - "Test login success/failure use cases" + """Test login success/failure use cases""" # User entries in the internal DB: # users = [ @@ -301,7 +301,7 @@ def test_login_sdk_failure_basic(self): @scenarios.appsec_auto_events_extended @features.user_monitoring class Test_Login_Events_Extended: - "Test login success/failure use cases" + """Test login success/failure use cases""" def setup_login_success_local(self): self.r_success = weblog.post("/login?auth=local", data=login_data(context, USER, PASSWORD)) @@ -557,7 +557,7 @@ def test_login_success_headers(self): def validate_login_success_headers(span): if span.get("parent_id") not in (0, None): - return + return None for header in HEADERS: assert f"http.request.headers.{header.lower()}" in span["meta"], f"Can't find {header} in span's meta" @@ -581,7 +581,7 @@ def test_login_failure_headers(self): def validate_login_failure_headers(span): if span.get("parent_id") not in (0, None): - return + return None for header in HEADERS: assert f"http.request.headers.{header.lower()}" in span["meta"], f"Can't find {header} in span's meta" @@ -594,8 +594,7 @@ def validate_login_failure_headers(span): @features.user_monitoring @features.user_id_collection_modes class Test_V2_Login_Events: - """ - Test login success/failure use cases + """Test login success/failure use cases By default, mode is identification """ @@ -1088,7 +1087,7 @@ def test_login_success_headers(self): def validate_login_success_headers(span): if span.get("parent_id") not in (0, None): - return + return None for header in HEADERS: assert f"http.request.headers.{header.lower()}" in span["meta"], f"Can't find {header} in span's meta" @@ -1109,7 +1108,7 @@ def test_login_failure_headers(self): def validate_login_failure_headers(span): if span.get("parent_id") not in (0, None): - return + return None for header in HEADERS: assert f"http.request.headers.{header.lower()}" in span["meta"], f"Can't find {header} in span's meta" @@ -1230,8 +1229,7 @@ def validate_iden(meta): @features.user_monitoring @features.user_id_collection_modes class Test_V3_Login_Events: - """ - Test login success/failure use cases + """Test login success/failure use cases By default, mode is identification """ @@ -1517,7 +1515,7 @@ def test_login_success_headers(self): def validate_login_success_headers(span): if span.get("parent_id") not in (0, None): - return + return None for header in HEADERS: assert f"http.request.headers.{header.lower()}" in span["meta"], f"Can't find {header} in span's meta" @@ -1538,7 +1536,7 @@ def test_login_failure_headers(self): def validate_login_failure_headers(span): if span.get("parent_id") not in (0, None): - return + return None for header in HEADERS: assert f"http.request.headers.{header.lower()}" in span["meta"], f"Can't find {header} in span's meta" @@ -1972,7 +1970,6 @@ def setup_login_event_blocking_sdk(self): "/login?auth=local&sdk_event=success&sdk_user=sdkUser", data=login_data(context, UUID_USER, PASSWORD) ) - @missing_feature(context.library == "nodejs", reason="SDK blocking not implemented") def test_login_event_blocking_sdk(self): assert self.config_state_1[rc.RC_STATE] == rc.ApplyState.ACKNOWLEDGED assert self.r_login.status_code == 200 diff --git a/tests/appsec/test_blocking_addresses.py b/tests/appsec/test_blocking_addresses.py index 83c87b7a4c..0e357cb965 100644 --- a/tests/appsec/test_blocking_addresses.py +++ b/tests/appsec/test_blocking_addresses.py @@ -48,7 +48,7 @@ def setup_blocking(self): self.rm_req_block = weblog.get(headers={"X-Forwarded-For": "1.1.1.1"}) def test_blocking(self): - """can block the request forwarded for the ip""" + """Can block the request forwarded for the ip""" assert self.rm_req_block.status_code == 403 interfaces.library.assert_waf_attack(self.rm_req_block, rule="blk-001-001") @@ -73,7 +73,7 @@ def setup_block_user(self): self.rm_req_block = weblog.get("/users", params={"user": "blockedUser"}) def test_block_user(self): - """can block the request from the user""" + """Can block the request from the user""" assert self.rm_req_block.status_code == 403 interfaces.library.assert_waf_attack(self.rm_req_block, rule="block-users") @@ -474,7 +474,7 @@ def setup_blocking(self): self.rbmp_req = weblog.post("/waf", files={"foo": (None, "bsldhkuqwgervf")}) def test_blocking(self): - """can block on server.request.body (multipart/form-data variant)""" + """Can block on server.request.body (multipart/form-data variant)""" interfaces.library.assert_waf_attack(self.rbmp_req, rule="tst-037-004") assert self.rbmp_req.status_code == 403 @@ -535,7 +535,7 @@ def setup_not_found(self): reason="The endpoint /finger_print is not implemented in the weblog", ) def test_not_found(self): - """can block on server.response.status""" + """Can block on server.response.status""" interfaces.library.assert_waf_attack(self.rnf_req, rule="tst-037-010") assert self.rnf_req.status_code == 403 @@ -551,9 +551,9 @@ class Test_Blocking_response_headers: def setup_blocking(self): if not hasattr(self, "rm_req_block1") or self.rm_req_block1 is None: - self.rm_req_block1 = weblog.get(f"/tag_value/anything/200?content-language=fo-fo") + self.rm_req_block1 = weblog.get("/tag_value/anything/200?content-language=fo-fo") if not hasattr(self, "rm_req_block2") or self.rm_req_block2 is None: - self.rm_req_block2 = weblog.get(f"/tag_value/anything/200?content-language=krypton") + self.rm_req_block2 = weblog.get("/tag_value/anything/200?content-language=krypton") @missing_feature( context.scenario is scenarios.external_processing_blocking, @@ -567,8 +567,8 @@ def test_blocking(self): def setup_non_blocking(self): self.setup_blocking() - self.rm_req_nonblock1 = weblog.get(f"/tag_value/anything/200?content-color=fo-fo") - self.rm_req_nonblock2 = weblog.get(f"/tag_value/anything/200?content-language=fr") + self.rm_req_nonblock1 = weblog.get("/tag_value/anything/200?content-color=fo-fo") + self.rm_req_nonblock2 = weblog.get("/tag_value/anything/200?content-language=fr") @missing_feature( context.scenario is scenarios.external_processing_blocking, @@ -589,7 +589,7 @@ class Test_Suspicious_Request_Blocking: def setup_blocking(self): self.rm_req_block = weblog.get( - f"/tag_value/malicious-path-cGDgSRJvklxGOKMTNfQMViBPpKAvpFoc_malicious-uri-ypMrmzrWATkLrPKLblvpRGGltBSgHWrK/200?attack=malicious-query-SAGihOkuSwXXFDXNqAWJzNuZEdKNunrJ", + "/tag_value/malicious-path-cGDgSRJvklxGOKMTNfQMViBPpKAvpFoc_malicious-uri-ypMrmzrWATkLrPKLblvpRGGltBSgHWrK/200?attack=malicious-query-SAGihOkuSwXXFDXNqAWJzNuZEdKNunrJ", cookies={"foo": "malicious-cookie-PwXuEQEdeAjzWpCDqAzPqiUAdXJMHwtS"}, headers={"content-type": "text/plain", "client": "malicious-header-kCgvxrYeiwUSYkAuniuGktdvzXYEPSff"}, ) @@ -612,7 +612,7 @@ def setup_blocking_before(self): cookies={"foo": "malicious-cookie-PwXuEQEdeAjzWpCDqAzPqiUAdXJMHwtS"}, ) self.block_req2 = weblog.get( - f"/tag_value/malicious-path-cGDgSRJvklxGOKMTNfQMViBPpKAvpFoc_malicious-uri-ypMrmzrWATkLrPKLblvpRGGltBSgHWrK/200?attack=malicious-query-SAGihOkuSwXXFDXNqAWJzNuZEdKNunrJ", + "/tag_value/malicious-path-cGDgSRJvklxGOKMTNfQMViBPpKAvpFoc_malicious-uri-ypMrmzrWATkLrPKLblvpRGGltBSgHWrK/200?attack=malicious-query-SAGihOkuSwXXFDXNqAWJzNuZEdKNunrJ", cookies={"foo": "malicious-cookie-PwXuEQEdeAjzWpCDqAzPqiUAdXJMHwtS"}, headers={"content-type": "text/plain", "client": "malicious-header-kCgvxrYeiwUSYkAuniuGktdvzXYEPSff"}, ) @@ -637,7 +637,7 @@ def test_blocking_before(self): def setup_blocking_without_path_params(self): self.rm_req_block = weblog.get( - f"/tag_value/path_param_malicious-uri-wX1GdUiWdVdoklf0pYBi5kQApO9i77tN/200?attack=malicious-query-T3d1nKdkTWIG03q03ix9c9UlhbGigvwQ", + "/tag_value/path_param_malicious-uri-wX1GdUiWdVdoklf0pYBi5kQApO9i77tN/200?attack=malicious-query-T3d1nKdkTWIG03q03ix9c9UlhbGigvwQ", cookies={"foo": "malicious-cookie-qU4sV2r6ac2nfETV7aJP9Fdt1NaWC9wB"}, headers={"content-type": "text/plain", "client": "malicious-header-siDzyETAdkvKahD3PxlvIqcE0fMIVywE"}, ) @@ -654,7 +654,7 @@ def setup_blocking_before_without_path_params(self): cookies={"foo": "malicious-cookie-qU4sV2r6ac2nfETV7aJP9Fdt1NaWC9wB"}, ) self.block_req2 = weblog.get( - f"/tag_value/path_param_malicious-uri-wX1GdUiWdVdoklf0pYBi5kQApO9i77tN/200?attack=malicious-query-T3d1nKdkTWIG03q03ix9c9UlhbGigvwQ", + "/tag_value/path_param_malicious-uri-wX1GdUiWdVdoklf0pYBi5kQApO9i77tN/200?attack=malicious-query-T3d1nKdkTWIG03q03ix9c9UlhbGigvwQ", cookies={"foo": "malicious-cookie-qU4sV2r6ac2nfETV7aJP9Fdt1NaWC9wB"}, headers={"content-type": "text/plain", "client": "malicious-header-siDzyETAdkvKahD3PxlvIqcE0fMIVywE"}, ) diff --git a/tests/appsec/test_conf.py b/tests/appsec/test_conf.py index ec20fb6b91..0059d27a65 100644 --- a/tests/appsec/test_conf.py +++ b/tests/appsec/test_conf.py @@ -2,7 +2,7 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import weblog, context, interfaces, missing_feature, irrelevant, rfc, scenarios, features, flaky, waf_rules +from utils import weblog, context, interfaces, missing_feature, irrelevant, rfc, scenarios, features from utils.tools import nested_lookup from utils.dd_constants import PYTHON_RELEASE_GA_1_1 @@ -24,7 +24,7 @@ def setup_enabled(self): self.r_enabled = weblog.get("/waf/", headers={"User-Agent": "Arachni/v1"}) def test_enabled(self): - """test DD_APPSEC_ENABLED = true""" + """Test DD_APPSEC_ENABLED = true""" interfaces.library.assert_waf_attack(self.r_enabled) def setup_disabled(self): @@ -34,7 +34,7 @@ def setup_disabled(self): @missing_feature("sinatra" in context.weblog_variant, reason="Sinatra endpoint not implemented") @scenarios.everything_disabled def test_disabled(self): - """test DD_APPSEC_ENABLED = false""" + """Test DD_APPSEC_ENABLED = false""" assert self.r_disabled.status_code == 200 interfaces.library.assert_no_appsec_event(self.r_disabled) @@ -43,7 +43,7 @@ def setup_appsec_rules(self): @scenarios.appsec_custom_rules def test_appsec_rules(self): - """test DD_APPSEC_RULES = custom rules file""" + """Test DD_APPSEC_RULES = custom rules file""" interfaces.library.assert_waf_attack(self.r_appsec_rules, pattern="dedicated-value-for-testing-purpose") def setup_waf_timeout(self): @@ -59,7 +59,7 @@ def setup_waf_timeout(self): @missing_feature("sinatra" in context.weblog_variant, reason="Sinatra endpoint not implemented") @scenarios.appsec_low_waf_timeout def test_waf_timeout(self): - """test DD_APPSEC_WAF_TIMEOUT = low value""" + """Test DD_APPSEC_WAF_TIMEOUT = low value""" assert self.r_waf_timeout.status_code == 200 interfaces.library.assert_no_appsec_event(self.r_waf_timeout) @@ -70,7 +70,7 @@ def setup_obfuscation_parameter_key(self): @missing_feature(context.library < f"python@{PYTHON_RELEASE_GA_1_1}") @scenarios.appsec_custom_obfuscation def test_obfuscation_parameter_key(self): - """test DD_APPSEC_OBFUSCATION_PARAMETER_KEY_REGEXP""" + """Test DD_APPSEC_OBFUSCATION_PARAMETER_KEY_REGEXP""" def validate_appsec_span_tags(span, appsec_data): # pylint: disable=unused-argument assert not nested_lookup( @@ -88,7 +88,7 @@ def setup_obfuscation_parameter_value(self): @missing_feature(context.library < f"python@{PYTHON_RELEASE_GA_1_1}") @scenarios.appsec_custom_obfuscation def test_obfuscation_parameter_value(self): - """test DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP""" + """Test DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP""" def validate_appsec_span_tags(span, appsec_data): # pylint: disable=unused-argument assert not nested_lookup( diff --git a/tests/appsec/test_customconf.py b/tests/appsec/test_customconf.py index d9c5278543..f3331bbe1a 100644 --- a/tests/appsec/test_customconf.py +++ b/tests/appsec/test_customconf.py @@ -2,7 +2,7 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import weblog, context, interfaces, bug, scenarios, features +from utils import weblog, context, interfaces, scenarios, features # get the default log output diff --git a/tests/appsec/test_event_tracking.py b/tests/appsec/test_event_tracking.py index cdfca1c643..e902e6d4ef 100644 --- a/tests/appsec/test_event_tracking.py +++ b/tests/appsec/test_event_tracking.py @@ -69,7 +69,7 @@ def test_user_login_success_header_collection(self): def validate_user_login_success_header_collection(span): if span.get("parent_id") not in (0, None): - return + return None for header in HEADERS: assert f"http.request.headers.{header.lower()}" in span["meta"], f"Can't find {header} in span's meta" @@ -123,7 +123,7 @@ def test_user_login_failure_header_collection(self): def validate_user_login_failure_header_collection(span): if span.get("parent_id") not in (0, None): - return + return None for header in HEADERS: assert f"http.request.headers.{header.lower()}" in span["meta"], f"Can't find {header} in span's meta" diff --git a/tests/appsec/test_ip_blocking_full_denylist.py b/tests/appsec/test_ip_blocking_full_denylist.py index bfd142e062..af80fa959d 100644 --- a/tests/appsec/test_ip_blocking_full_denylist.py +++ b/tests/appsec/test_ip_blocking_full_denylist.py @@ -2,7 +2,6 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. - from utils import weblog, context, interfaces, rfc, bug, scenarios, missing_feature, features from .utils import BaseFullDenyListTest @@ -27,8 +26,9 @@ def setup_blocked_ips(self): @missing_feature(weblog_variant="spring-boot" and context.library < "java@0.111.0") @bug(context.library >= "java@1.22.0" and context.library < "java@1.35.0", reason="APMRP-360") + @bug(context.library < "ruby@2.11.0-dev", reason="APMRP-56691") def test_blocked_ips(self): - """test blocked ips are enforced""" + """Test blocked ips are enforced""" self.assert_protocol_is_respected() diff --git a/tests/appsec/test_rate_limiter.py b/tests/appsec/test_rate_limiter.py index 7df53ac6a8..f3ce924df7 100644 --- a/tests/appsec/test_rate_limiter.py +++ b/tests/appsec/test_rate_limiter.py @@ -5,7 +5,7 @@ import datetime import time -from utils import weblog, context, interfaces, rfc, bug, scenarios, flaky, features +from utils import weblog, context, interfaces, rfc, bug, scenarios, features from utils.tools import logger @@ -20,8 +20,7 @@ class Test_Main: # as sampling mechnism is very different across agent, it won't be an easy task def setup_main(self): - """ - Make 5 requests per second, for 10 seconds. + """Make 5 requests per second, for 10 seconds. The test may be flaky if all requests takes more than 200ms, but it's very unlikely """ @@ -42,7 +41,7 @@ def setup_main(self): context.library > "nodejs@3.14.1" and context.library < "nodejs@4.8.0", reason="APMRP-360" ) # _sampling_priority_v1 is missing def test_main(self): - """send requests for 10 seconds, check that only 10-ish traces are sent, as rate limiter is set to 1/s""" + """Send requests for 10 seconds, check that only 10-ish traces are sent, as rate limiter is set to 1/s""" MANUAL_KEEP = 2 trace_count = 0 diff --git a/tests/appsec/test_reports.py b/tests/appsec/test_reports.py index d82110ded2..2a9805b421 100644 --- a/tests/appsec/test_reports.py +++ b/tests/appsec/test_reports.py @@ -171,9 +171,9 @@ def setup_basic(self): self.r = weblog.get("/waf/", headers={"User-Agent": "Arachni/v1"}) def test_basic(self): - """attack timestamp is given by start property of span""" + """Attack timestamp is given by start property of span""" spans = [span for _, _, span, _ in interfaces.library.get_appsec_events(request=self.r)] assert spans, "No AppSec events found" for span in spans: assert "start" in span, "span should contain start property" - assert isinstance(span["start"], int), f"start property should an int, not {repr(span['start'])}" + assert isinstance(span["start"], int), f"start property should an int, not {span['start']!r}" diff --git a/tests/appsec/test_request_blocking.py b/tests/appsec/test_request_blocking.py index 10a349c20f..651f07f25e 100644 --- a/tests/appsec/test_request_blocking.py +++ b/tests/appsec/test_request_blocking.py @@ -32,7 +32,7 @@ def setup_request_blocking(self): self.blocked_requests2 = weblog.get(params={"random-key": "/netsparker-"}) def test_request_blocking(self): - """test requests are blocked by rules in blocking mode""" + """Test requests are blocked by rules in blocking mode""" assert self.config_state[remote_config.RC_STATE] == remote_config.ApplyState.ACKNOWLEDGED diff --git a/tests/appsec/test_runtime_activation.py b/tests/appsec/test_runtime_activation.py index f3de3c0bef..6bd3a5fe5f 100644 --- a/tests/appsec/test_runtime_activation.py +++ b/tests/appsec/test_runtime_activation.py @@ -5,7 +5,6 @@ from utils import bug from utils import context from utils import features -from utils import flaky from utils import interfaces from utils import remote_config as rc from utils import scenarios diff --git a/tests/appsec/test_suspicious_attacker_blocking.py b/tests/appsec/test_suspicious_attacker_blocking.py index 4f26795086..25b1fa22d8 100644 --- a/tests/appsec/test_suspicious_attacker_blocking.py +++ b/tests/appsec/test_suspicious_attacker_blocking.py @@ -3,9 +3,7 @@ # Copyright 2021 Datadog, Inc. -from utils import context from utils import features -from utils import flaky from utils import interfaces from utils import remote_config as rc from utils import scenarios diff --git a/tests/appsec/test_traces.py b/tests/appsec/test_traces.py index e51d7b6791..2988603ae0 100644 --- a/tests/appsec/test_traces.py +++ b/tests/appsec/test_traces.py @@ -27,14 +27,13 @@ def setup_appsec_event_span_tags(self): self.r = weblog.get("/waf/", headers={"User-Agent": "Arachni/v1"}) def test_appsec_event_span_tags(self): - """ - Spans with AppSec events should have the general AppSec span tags, along with the appsec.event and + """Spans with AppSec events should have the general AppSec span tags, along with the appsec.event and _sampling_priority_v1 tags """ def validate_appsec_event_span_tags(span): if span.get("parent_id") not in (0, None): # do nothing if not root span - return + return None if "appsec.event" not in span["meta"]: raise Exception("Can't find appsec.event in span's meta") @@ -88,8 +87,7 @@ def setup_header_collection(self): @irrelevant(context.library not in ["golang", "nodejs", "java", "dotnet"], reason="test") @irrelevant(context.scenario is scenarios.external_processing, reason="Irrelevant tag set for golang") def test_header_collection(self): - """ - AppSec should collect some headers for http.request and http.response and store them in span tags. + """AppSec should collect some headers for http.request and http.response and store them in span tags. Note that this test checks for collection, not data. """ spans = [span for _, _, span in interfaces.library.get_spans(request=self.r)] @@ -242,8 +240,7 @@ def setup_appsec_obfuscator_cookies_with_custom_rules(self): @scenarios.appsec_custom_rules def test_appsec_obfuscator_cookies_with_custom_rules(self): - """ - Specific obfuscation test for the cookies which often contain sensitive data and are + """Specific obfuscation test for the cookies which often contain sensitive data and are expected to be properly obfuscated on sensitive cookies only. """ # Validate that the AppSec events do not contain the following secret value. @@ -302,9 +299,7 @@ def setup_collect_default_request_headers(self): self.r = weblog.get("/headers", headers={header: "myHeaderValue" for header in self.HEADERS}) def test_collect_default_request_headers(self): - """ - Collect User agent and other headers and other security info when appsec is enabled. - """ + """Collect User agent and other headers and other security info when appsec is enabled.""" def assertHeaderInSpanMeta(span, header): if header not in span["meta"]: @@ -340,9 +335,7 @@ def setup_external_wafs_header_collection(self): ) def test_external_wafs_header_collection(self): - """ - Collect external wafs request identifier and other security info when appsec is enabled. - """ + """Collect external wafs request identifier and other security info when appsec is enabled.""" def assertHeaderInSpanMeta(span, header): if header not in span["meta"]: diff --git a/tests/appsec/test_user_blocking_full_denylist.py b/tests/appsec/test_user_blocking_full_denylist.py index e97f160f86..e752a99aa7 100644 --- a/tests/appsec/test_user_blocking_full_denylist.py +++ b/tests/appsec/test_user_blocking_full_denylist.py @@ -1,4 +1,4 @@ -from utils import context, interfaces, scenarios, weblog, bug, features, missing_feature +from utils import context, interfaces, scenarios, weblog, bug, features from .utils import BaseFullDenyListTest @@ -33,7 +33,8 @@ def setup_blocking_test(self): @bug(context.library < "ruby@1.12.1", reason="APMRP-360") @bug(context.library >= "java@1.22.0" and context.library < "java@1.35.0", reason="APMRP-360") - @bug(library="java", reason="APPSEC-56006") + @bug(library="java", weblog_variant="spring-boot-payara", reason="APPSEC-56006") + @bug(context.library < "ruby@2.11.0-dev", reason="APMRP-56691") def test_blocking_test(self): """Test with a denylisted user""" diff --git a/tests/appsec/waf/test_addresses.py b/tests/appsec/waf/test_addresses.py index 537b90f1a4..508e78320a 100644 --- a/tests/appsec/waf/test_addresses.py +++ b/tests/appsec/waf/test_addresses.py @@ -102,7 +102,7 @@ def setup_specific_key2(self): @irrelevant(library="php", reason="PHP normalizes into dashes; additionally, matching on keys is not supported") @missing_feature(weblog_variant="spring-boot-3-native", reason="GraalVM. Tracing support only") def test_specific_key2(self): - """attacks on specific header X_Filename, and report it""" + """Attacks on specific header X_Filename, and report it""" try: interfaces.library.assert_waf_attack( self.r_sk_4, pattern="routing.yml", address="server.request.headers.no_cookies", key_path=["x_filename"] @@ -338,8 +338,8 @@ def test_basic(self): for r in self.requests: try: interfaces.library.assert_waf_attack(r, address="grpc.server.request.message") - except: - raise ValueError(f"Basic attack #{self.requests.index(r)} not detected") + except Exception as e: + raise ValueError(f"Basic attack #{self.requests.index(r)} not detected") from e @rfc("https://datadoghq.atlassian.net/wiki/spaces/APS/pages/2278064284/gRPC+Protocol+Support") @@ -450,7 +450,7 @@ class Test_GrpcServerMethod: def validate_span(self, span, appsec_data): tag = "rpc.grpc.full_method" - if not tag in span["meta"]: + if tag not in span["meta"]: logger.info(f"Can't find '{tag}' in span's meta") return False diff --git a/tests/appsec/waf/test_blocking.py b/tests/appsec/waf/test_blocking.py index 2a137d8794..b475987f87 100644 --- a/tests/appsec/waf/test_blocking.py +++ b/tests/appsec/waf/test_blocking.py @@ -76,10 +76,10 @@ def test_blocking_appsec_blocked_tag(self): def validate_appsec_blocked(span): if span.get("type") != "web": - return + return None if span.get("parent_id") not in (0, None): # do nothing if not root span - return + return None if "appsec.blocked" not in span["meta"]: raise ValueError("Can't find appsec.blocked in span's tags") @@ -201,7 +201,7 @@ def test_html_template_v2(self): @features.appsec_blocking_action class Test_Blocking_strip_response_headers: def setup_strip_response_headers(self): - self.r_srh = weblog.get(f"/tag_value/anything/200?x-secret-header=123&content-language=krypton") + self.r_srh = weblog.get("/tag_value/anything/200?x-secret-header=123&content-language=krypton") def test_strip_response_headers(self): """Test if headers are stripped from the blocking response""" diff --git a/tests/appsec/waf/test_reports.py b/tests/appsec/waf/test_reports.py index 22d4cc5574..03167ddc1e 100644 --- a/tests/appsec/waf/test_reports.py +++ b/tests/appsec/waf/test_reports.py @@ -51,8 +51,7 @@ def setup_waf_monitoring_once(self): self.r_once = weblog.get("/waf/", headers={"User-Agent": "Arachni/v1"}) def test_waf_monitoring_once(self): - """ - Some WAF monitoring span tags and metrics are expected to be sent at + """Some WAF monitoring span tags and metrics are expected to be sent at least once in a request span at some point """ @@ -67,8 +66,7 @@ def test_waf_monitoring_once(self): ] def validate_rules_monitoring_span_tags(span): - """ - Validate the mandatory rules monitoring span tags are added to a request span at some point such as the + """Validate the mandatory rules monitoring span tags are added to a request span at some point such as the first request or first attack. """ @@ -109,10 +107,10 @@ def validate_rules_monitoring_span_tags(span): raise Exception("if there are rule errors, there should be rule error details too") try: json.loads(meta[expected_rules_errors_meta_tag]) - except ValueError: + except ValueError as e: raise Exception( f"rule error details should be valid JSON but was `{meta[expected_rules_errors_meta_tag]}`" - ) + ) from e return True @@ -156,8 +154,7 @@ def setup_waf_monitoring_errors(self): @scenarios.appsec_rules_monitoring_with_errors def test_waf_monitoring_errors(self): - """ - Some WAF monitoring span tags and metrics are expected to be sent at + """Some WAF monitoring span tags and metrics are expected to be sent at least once in a request span at some point """ @@ -178,8 +175,7 @@ def test_waf_monitoring_errors(self): expected_error_details = {"missing key 'name'": ["missing-name"], "missing key 'tags'": ["missing-tags"]} def validate_rules_monitoring_span_tags(span): - """ - Validate the mandatory rules monitoring span tags are added to a request span at some point such as the + """Validate the mandatory rules monitoring span tags are added to a request span at some point such as the first request or first attack. """ diff --git a/tests/appsec/waf/test_telemetry.py b/tests/appsec/waf/test_telemetry.py index 1fe62be390..97f93cf05b 100644 --- a/tests/appsec/waf/test_telemetry.py +++ b/tests/appsec/waf/test_telemetry.py @@ -1,4 +1,4 @@ -from utils import bug, context, interfaces, irrelevant, features, flaky, missing_feature, rfc, scenarios, weblog +from utils import bug, context, interfaces, features, rfc, scenarios, weblog from utils.tools import logger TELEMETRY_REQUEST_TYPE_GENERATE_METRICS = "generate-metrics" @@ -6,25 +6,25 @@ def _setup(self): - """ - Common setup for all tests in this module. They all depend on the same set + """Common setup for all tests in this module. They all depend on the same set of requests, which must be run only once. """ # Run only once, even across multiple class instances. if hasattr(Test_TelemetryMetrics, "__common_setup_done"): return - r_plain = weblog.get("/", headers={"x-forwarded-for": "80.80.80.80"}) - r_triggered = weblog.get("/", headers={"x-forwarded-for": "80.80.80.80", "user-agent": "Arachni/v1"}) - r_blocked = weblog.get( + weblog.get("/", headers={"x-forwarded-for": "80.80.80.80"}) + weblog.get("/", headers={"x-forwarded-for": "80.80.80.80", "user-agent": "Arachni/v1"}) + weblog.get( "/", headers={"x-forwarded-for": "80.80.80.80", "user-agent": "dd-test-scanner-log-block"}, - # XXX: hack to prevent rid inhibiting the dd-test-scanner-log-block rule + # Hack to prevent rid inhibiting the dd-test-scanner-log-block rule rid_in_user_agent=False, ) Test_TelemetryMetrics.__common_setup_done = True @rfc("https://docs.google.com/document/d/1qBDsS_ZKeov226CPx2DneolxaARd66hUJJ5Lh9wjhlE") +@rfc("https://docs.google.com/document/d/1D4hkC0jwwUyeo0hEQgyKP54kM1LZU98GL8MaP60tQrA") @scenarios.appsec_waf_telemetry @features.waf_telemetry class Test_TelemetryMetrics: @@ -35,7 +35,9 @@ class Test_TelemetryMetrics: @bug(context.library < "java@1.13.0", reason="APMRP-360") def test_headers_are_correct(self): """Tests that all telemetry requests have correct headers.""" - for data in interfaces.library.get_telemetry_data(flatten_message_batches=False): + datas = list(interfaces.library.get_telemetry_data(flatten_message_batches=False)) + assert len(datas) > 0, "No telemetry received" + for data in datas: request_type = data["request"]["content"].get("request_type") _validate_headers(data["request"]["headers"], request_type) @@ -53,6 +55,7 @@ def test_metric_waf_init(self): "event_rules_version", "version", "lib_language", + "success", } series = self._find_series(TELEMETRY_REQUEST_TYPE_GENERATE_METRICS, "appsec", expected_metric_name) # TODO(Python). Gunicorn creates 2 process (main gunicorn process + X child workers). It generates two init diff --git a/tests/auto_inject/test_auto_inject_chaos.py b/tests/auto_inject/test_auto_inject_chaos.py index 6cfa72505f..7c5abb54e5 100644 --- a/tests/auto_inject/test_auto_inject_chaos.py +++ b/tests/auto_inject/test_auto_inject_chaos.py @@ -1,9 +1,8 @@ import requests -from utils import scenarios, features +from utils import scenarios, features, context from utils.tools import logger from utils.onboarding.weblog_interface import warmup_weblog from utils.onboarding.wait_for_tcp_port import wait_for_port -from utils import scenarios, context, features import tests.auto_inject.utils as base from utils.virtual_machine.utils import parametrize_virtual_machines @@ -13,7 +12,8 @@ def _test_removing_things(self, virtual_machine, evil_command): """Test break the installation and restore it. After breaking the installation, the app should be still working (but no sending traces to the backend). After breaking the installation, we can restart the app - After restores the installation, the app should be working and sending traces to the backend.""" + After restores the installation, the app should be working and sending traces to the backend. + """ vm_ip = virtual_machine.get_ip() vm_port = virtual_machine.deffault_open_port diff --git a/tests/auto_inject/test_auto_inject_guardrail.py b/tests/auto_inject/test_auto_inject_guardrail.py index 4ba95c3ac3..e428fd2589 100644 --- a/tests/auto_inject/test_auto_inject_guardrail.py +++ b/tests/auto_inject/test_auto_inject_guardrail.py @@ -2,7 +2,6 @@ from utils.tools import logger from utils.onboarding.weblog_interface import make_get_request, warmup_weblog from utils.onboarding.wait_for_tcp_port import wait_for_port -from utils import scenarios, features from utils.virtual_machine.utils import parametrize_virtual_machines diff --git a/tests/auto_inject/test_auto_inject_install.py b/tests/auto_inject/test_auto_inject_install.py index d1d39ac70f..11a42028f6 100644 --- a/tests/auto_inject/test_auto_inject_install.py +++ b/tests/auto_inject/test_auto_inject_install.py @@ -1,8 +1,6 @@ -import re from utils import scenarios, features, flaky, irrelevant, context from utils.tools import logger from utils.onboarding.weblog_interface import warmup_weblog, get_child_pids, get_zombies, fork_and_crash -from utils import scenarios, features import tests.auto_inject.utils as base from utils.virtual_machine.utils import parametrize_virtual_machines @@ -38,6 +36,7 @@ class TestSimpleInstallerAutoInjectManualProfiling(base.AutoInjectBaseTest): {"vm_cpu": "arm64", "weblog_variant": "test-app-dotnet-container", "reason": "PROF-10783"}, {"vm_name": "Ubuntu_24_amd64", "weblog-variant": "test-app-nodejs", "reason": "PROF-11264"}, {"vm_name": "Ubuntu_24_arm64", "weblog-variant": "test-app-nodejs", "reason": "PROF-11264"}, + {"weblog_variant": "test-app-python-alpine", "reason": "PROF-11296"}, ] ) def test_profiling(self, virtual_machine): @@ -76,7 +75,10 @@ def test_install(self, virtual_machine): @scenarios.container_auto_injection_install_script_profiling class TestContainerAutoInjectInstallScriptProfiling(base.AutoInjectBaseTest): @parametrize_virtual_machines( - bugs=[{"vm_cpu": "arm64", "weblog_variant": "test-app-dotnet-container", "reason": "PROF-10783"}] + bugs=[ + {"vm_cpu": "arm64", "weblog_variant": "test-app-dotnet-container", "reason": "PROF-10783"}, + {"weblog_variant": "test-app-python-alpine", "reason": "PROF-11296"}, + ] ) def test_profiling(self, virtual_machine): self._test_install(virtual_machine, profile=True) @@ -119,7 +121,7 @@ def test_crash_no_zombie(self, virtual_machine): try: crash_result = fork_and_crash(virtual_machine) logger.info("fork_and_crash: " + crash_result) - except Exception as e: + except Exception: process_tree = self.execute_command(virtual_machine, "ps aux --forest") logger.warning("Failure process tree: " + process_tree) raise @@ -148,8 +150,8 @@ def test_crash_no_zombie(self, virtual_machine): @scenarios.installer_auto_injection class TestInstallerAutoInjectManual(base.AutoInjectBaseTest): # Note: uninstallation of a single installer package is not available today - #  on the installer. As we can't only uninstall the injector, we are skipping - #  the uninstall test today + # on the installer. As we can't only uninstall the injector, we are skipping + # the uninstall test today @parametrize_virtual_machines( bugs=[ {"vm_name": "AlmaLinux_8_arm64", "weblog_variant": "test-app-python-alpine", "reason": "APMON-1576"}, diff --git a/tests/auto_inject/test_blocklist_auto_inject.py b/tests/auto_inject/test_blocklist_auto_inject.py index eecfd12500..66e22a427b 100644 --- a/tests/auto_inject/test_blocklist_auto_inject.py +++ b/tests/auto_inject/test_blocklist_auto_inject.py @@ -130,6 +130,6 @@ def test_builtIn_instrument_args(self, virtual_machine): ssh_client = virtual_machine.ssh_config.get_ssh_connection() for command in self.buildIn_args_commands_injected[language]: local_log_file = self._execute_remote_command(ssh_client, command) - assert False == command_injection_skipped( - command, local_log_file + assert ( + command_injection_skipped(command, local_log_file) is False ), f"The command {command} was not instrumented, but it should be instrumented!" diff --git a/tests/auto_inject/utils.py b/tests/auto_inject/utils.py index c66ee153f6..83ec2497be 100644 --- a/tests/auto_inject/utils.py +++ b/tests/auto_inject/utils.py @@ -1,9 +1,3 @@ -import json -import os -import requests -import time -import pytest -import paramiko from utils.tools import logger from utils.onboarding.weblog_interface import make_get_request, warmup_weblog, make_internal_get_request from utils.onboarding.backend_interface import wait_backend_trace_id @@ -17,7 +11,8 @@ class AutoInjectBaseTest: def _test_install(self, virtual_machine, profile: bool = False): """We can easily install agent and lib injection software from agent installation script. Given a sample application we can enable tracing using local environment variables. After starting application we can see application HTTP requests traces in the backend. - Using the agent installation script we can install different versions of the software (release or beta) in different OS.""" + Using the agent installation script we can install different versions of the software (release or beta) in different OS. + """ vm_ip = virtual_machine.get_ip() vm_port = virtual_machine.deffault_open_port vm_context_url = f"http://{vm_ip}:{vm_port}{virtual_machine.get_deployed_weblog().app_context_url}" @@ -27,7 +22,7 @@ def _test_install(self, virtual_machine, profile: bool = False): ) if virtual_machine.krunvm_config is not None and virtual_machine.krunvm_config.stdin is not None: logger.info( - f"We are testing on krunvm. The request to the weblog will be done using the stdin (inside the microvm)" + "We are testing on krunvm. The request to the weblog will be done using the stdin (inside the microvm)" ) request_uuid = make_internal_get_request(virtual_machine.krunvm_config.stdin, vm_port) else: @@ -82,7 +77,8 @@ def _test_uninstall_commands( """We can unistall the auto injection software. We can start the app again The weblog app should work but no sending traces to the backend. We can reinstall the auto inject software. The weblog app should be instrumented - and reporting traces to the backend.""" + and reporting traces to the backend. + """ logger.info(f"Launching _test_uninstall for : [{virtual_machine.name}]") vm_ip = virtual_machine.get_ip() @@ -111,7 +107,7 @@ def _test_uninstall_commands( logger.info(f"Making a request to weblog [http://{vm_ip}:{vm_port}{app['url']}]") request_uuids.append(make_get_request(f"http://{vm_ip}:{vm_port}{app['url']}")) else: - logger.info(f"Making a request to weblog [weblog_url]") + logger.info(f"Making a request to weblog {weblog_url}") request_uuids.append(make_get_request(weblog_url)) try: @@ -151,8 +147,8 @@ def _test_uninstall(self, virtual_machine): start_weblog_command = virtual_machine._vm_provision.weblog_installation.remote_command else: # Container stop_weblog_command = "sudo -E docker-compose -f docker-compose.yml down" - #   On older Docker versions, the network recreation can hang. The solution is to restart Docker. - #   https://github.com/docker-archive/classicswarm/issues/1931 + # On older Docker versions, the network recreation can hang. The solution is to restart Docker. + # https://github.com/docker-archive/classicswarm/issues/1931 start_weblog_command = "sudo systemctl restart docker && sudo -E docker-compose -f docker-compose.yml up --wait --wait-timeout 120" install_command = "sudo datadog-installer apm instrument" diff --git a/tests/debugger/approvals/exception_replay_inner_dotnet_snapshots_expected.json b/tests/debugger/approvals/exception_replay_inner_dotnet_snapshots_expected.json index f7ebada16d..1b6149eb8d 100644 --- a/tests/debugger/approvals/exception_replay_inner_dotnet_snapshots_expected.json +++ b/tests/debugger/approvals/exception_replay_inner_dotnet_snapshots_expected.json @@ -129,7 +129,7 @@ { "function": "weblog.ExceptionReplayController.ExceptionReplayInner", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 52 + "lineNumber": 60 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -143,6 +143,9 @@ "": "" } ], + "exceptionHash": "304a8161-128e-39b5-9e50-737cd9af638a", + "exceptionId": "", + "frameIndex": "1", "id": "", "timestamp": "", "duration": "", diff --git a/tests/debugger/approvals/exception_replay_inner_dotnet_spans_expected.json b/tests/debugger/approvals/exception_replay_inner_dotnet_spans_expected.json index d28e85906d..b90f20e365 100644 --- a/tests/debugger/approvals/exception_replay_inner_dotnet_spans_expected.json +++ b/tests/debugger/approvals/exception_replay_inner_dotnet_spans_expected.json @@ -4,9 +4,11 @@ "error": 1, "meta": { "_dd.appsec.event_rules.version": "", - "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", - "_dd.debug.error.0.frame_data.function": "ExceptionReplayInner", - "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.0.frame_data.name": "ExceptionReplayInner", + "_dd.debug.error.0.no_capture_reason": "The method ExceptionReplayInner could not be found.", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "ExceptionReplayInner", + "_dd.debug.error.1.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", "_dd.di._eh": "1519717919", diff --git a/tests/debugger/approvals/exception_replay_multiframe_dotnet_snapshots_expected.json b/tests/debugger/approvals/exception_replay_multiframe_dotnet_snapshots_expected.json index b98c59d389..2adc792b3d 100644 --- a/tests/debugger/approvals/exception_replay_multiframe_dotnet_snapshots_expected.json +++ b/tests/debugger/approvals/exception_replay_multiframe_dotnet_snapshots_expected.json @@ -85,12 +85,12 @@ { "function": "weblog.ExceptionReplayController.DeepFunctionA", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 91 + "lineNumber": 99 }, { "function": "weblog.ExceptionReplayController.ExceptionReplayMultiframe", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 98 + "lineNumber": 106 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -104,6 +104,9 @@ "": "" } ], + "exceptionHash": "c8afb7d4-ceb5-3a17-8748-6864a6eb7c39", + "exceptionId": "", + "frameIndex": "2", "id": "", "timestamp": "", "duration": "", @@ -191,17 +194,17 @@ { "function": "weblog.ExceptionReplayController.DeepFunctionB", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 86 + "lineNumber": 94 }, { "function": "weblog.ExceptionReplayController.DeepFunctionA", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 91 + "lineNumber": 99 }, { "function": "weblog.ExceptionReplayController.ExceptionReplayMultiframe", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 98 + "lineNumber": 106 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -215,6 +218,9 @@ "": "" } ], + "exceptionHash": "c8afb7d4-ceb5-3a17-8748-6864a6eb7c39", + "exceptionId": "", + "frameIndex": "1", "id": "", "timestamp": "", "duration": "", @@ -298,22 +304,22 @@ { "function": "weblog.ExceptionReplayController.DeepFunctionC", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 80 + "lineNumber": 88 }, { "function": "weblog.ExceptionReplayController.DeepFunctionB", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 86 + "lineNumber": 94 }, { "function": "weblog.ExceptionReplayController.DeepFunctionA", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 91 + "lineNumber": 99 }, { "function": "weblog.ExceptionReplayController.ExceptionReplayMultiframe", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 98 + "lineNumber": 106 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -327,6 +333,9 @@ "": "" } ], + "exceptionHash": "c8afb7d4-ceb5-3a17-8748-6864a6eb7c39", + "exceptionId": "", + "frameIndex": "0", "id": "", "timestamp": "", "duration": "", @@ -418,7 +427,7 @@ { "function": "weblog.ExceptionReplayController.ExceptionReplayMultiframe", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 98 + "lineNumber": 106 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -432,6 +441,9 @@ "": "" } ], + "exceptionHash": "c8afb7d4-ceb5-3a17-8748-6864a6eb7c39", + "exceptionId": "", + "frameIndex": "3", "id": "", "timestamp": "", "duration": "", diff --git a/tests/debugger/approvals/exception_replay_recursion_20_dotnet_snapshots_expected.json b/tests/debugger/approvals/exception_replay_recursion_20_dotnet_snapshots_expected.json index f0fcf49c87..1de157b51f 100644 --- a/tests/debugger/approvals/exception_replay_recursion_20_dotnet_snapshots_expected.json +++ b/tests/debugger/approvals/exception_replay_recursion_20_dotnet_snapshots_expected.json @@ -97,7 +97,1157 @@ "lineNumber": 0 }, { - "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 20" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "depth": { + "type": "Int32", + "value": "20" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursion", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, + { + "": "" + } + ], + "exceptionHash": "e1ad6a34-487f-3627-841e-edaf5c8eedf0", + "exceptionId": "", + "frameIndex": "21", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 20", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 20" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "originalDepth": { + "type": "Int32", + "value": "20" + }, + "currentDepth": { + "type": "Int32", + "value": "9" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursionHelper", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, + { + "": "" + } + ], + "exceptionHash": "e1ad6a34-487f-3627-841e-edaf5c8eedf0", + "exceptionId": "", + "frameIndex": "9", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 20", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 20" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "originalDepth": { + "type": "Int32", + "value": "20" + }, + "currentDepth": { + "type": "Int32", + "value": "8" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursionHelper", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, + { + "": "" + } + ], + "exceptionHash": "e1ad6a34-487f-3627-841e-edaf5c8eedf0", + "exceptionId": "", + "frameIndex": "8", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 20", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 20" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "originalDepth": { + "type": "Int32", + "value": "20" + }, + "currentDepth": { + "type": "Int32", + "value": "7" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursionHelper", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, + { + "": "" + } + ], + "exceptionHash": "e1ad6a34-487f-3627-841e-edaf5c8eedf0", + "exceptionId": "", + "frameIndex": "7", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 20", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 20" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "originalDepth": { + "type": "Int32", + "value": "20" + }, + "currentDepth": { + "type": "Int32", + "value": "6" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursionHelper", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, + { + "": "" + } + ], + "exceptionHash": "e1ad6a34-487f-3627-841e-edaf5c8eedf0", + "exceptionId": "", + "frameIndex": "6", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 20", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 20" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "originalDepth": { + "type": "Int32", + "value": "20" + }, + "currentDepth": { + "type": "Int32", + "value": "5" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursionHelper", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, + { + "": "" + } + ], + "exceptionHash": "e1ad6a34-487f-3627-841e-edaf5c8eedf0", + "exceptionId": "", + "frameIndex": "5", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 20", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "lineNumber": 0 }, { @@ -144,7 +1294,7 @@ }, "currentDepth": { "type": "Int32", - "value": "20" + "value": "4" }, "this": { "type": "ExceptionReplayController", @@ -162,10 +1312,111 @@ } }, "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, { "": "" } ], + "exceptionHash": "e1ad6a34-487f-3627-841e-edaf5c8eedf0", + "exceptionId": "", + "frameIndex": "4", "id": "", "timestamp": "", "duration": "", @@ -269,92 +1520,97 @@ { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -368,6 +1624,9 @@ "": "" } ], + "exceptionHash": "e1ad6a34-487f-3627-841e-edaf5c8eedf0", + "exceptionId": "", + "frameIndex": "3", "id": "", "timestamp": "", "duration": "", @@ -467,97 +1726,102 @@ { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -571,6 +1835,9 @@ "": "" } ], + "exceptionHash": "e1ad6a34-487f-3627-841e-edaf5c8eedf0", + "exceptionId": "", + "frameIndex": "2", "id": "", "timestamp": "", "duration": "", @@ -666,102 +1933,107 @@ { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -775,6 +2047,9 @@ "": "" } ], + "exceptionHash": "e1ad6a34-487f-3627-841e-edaf5c8eedf0", + "exceptionId": "", + "frameIndex": "1", "id": "", "timestamp": "", "duration": "", @@ -866,107 +2141,112 @@ { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -980,6 +2260,9 @@ "": "" } ], + "exceptionHash": "e1ad6a34-487f-3627-841e-edaf5c8eedf0", + "exceptionId": "", + "frameIndex": "0", "id": "", "timestamp": "", "duration": "", diff --git a/tests/debugger/approvals/exception_replay_recursion_20_dotnet_spans_expected.json b/tests/debugger/approvals/exception_replay_recursion_20_dotnet_spans_expected.json index fb528b984f..2bf29cbf9f 100644 --- a/tests/debugger/approvals/exception_replay_recursion_20_dotnet_spans_expected.json +++ b/tests/debugger/approvals/exception_replay_recursion_20_dotnet_spans_expected.json @@ -13,15 +13,33 @@ "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.2.snapshot_id": "", - "_dd.debug.error.20.frame_data.class_name": "ExceptionReplayController", - "_dd.debug.error.20.frame_data.function": "exceptionReplayRecursionHelper", - "_dd.debug.error.20.snapshot_id": "", + "_dd.debug.error.21.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.21.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.21.snapshot_id": "", "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.7.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.7.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.7.snapshot_id": "", + "_dd.debug.error.8.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.8.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.8.snapshot_id": "", + "_dd.debug.error.9.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.9.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.9.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "1010767962", + "_dd.di._eh": "-1900001729", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", @@ -72,15 +90,33 @@ "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.2.snapshot_id": "", - "_dd.debug.error.20.frame_data.class_name": "ExceptionReplayController", - "_dd.debug.error.20.frame_data.function": "exceptionReplayRecursionHelper", - "_dd.debug.error.20.snapshot_id": "", + "_dd.debug.error.21.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.21.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.21.snapshot_id": "", "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.7.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.7.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.7.snapshot_id": "", + "_dd.debug.error.8.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.8.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.8.snapshot_id": "", + "_dd.debug.error.9.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.9.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.9.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "1010767962", + "_dd.di._eh": "-1900001729", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", @@ -131,15 +167,33 @@ "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.2.snapshot_id": "", - "_dd.debug.error.20.frame_data.class_name": "ExceptionReplayController", - "_dd.debug.error.20.frame_data.function": "exceptionReplayRecursionHelper", - "_dd.debug.error.20.snapshot_id": "", + "_dd.debug.error.21.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.21.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.21.snapshot_id": "", "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.7.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.7.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.7.snapshot_id": "", + "_dd.debug.error.8.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.8.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.8.snapshot_id": "", + "_dd.debug.error.9.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.9.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.9.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "1010767962", + "_dd.di._eh": "-1900001729", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", @@ -190,15 +244,33 @@ "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.2.snapshot_id": "", - "_dd.debug.error.20.frame_data.class_name": "ExceptionReplayController", - "_dd.debug.error.20.frame_data.function": "exceptionReplayRecursionHelper", - "_dd.debug.error.20.snapshot_id": "", + "_dd.debug.error.21.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.21.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.21.snapshot_id": "", "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.7.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.7.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.7.snapshot_id": "", + "_dd.debug.error.8.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.8.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.8.snapshot_id": "", + "_dd.debug.error.9.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.9.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.9.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "1010767962", + "_dd.di._eh": "-1900001729", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", @@ -249,15 +321,495 @@ "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.2.snapshot_id": "", - "_dd.debug.error.20.frame_data.class_name": "ExceptionReplayController", - "_dd.debug.error.20.frame_data.function": "exceptionReplayRecursionHelper", - "_dd.debug.error.20.snapshot_id": "", + "_dd.debug.error.21.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.21.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.21.snapshot_id": "", "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.7.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.7.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.7.snapshot_id": "", + "_dd.debug.error.8.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.8.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.8.snapshot_id": "", + "_dd.debug.error.9.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.9.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.9.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "1010767962", + "_dd.di._eh": "-1900001729", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion (app)", + "aspnet_core.route": "exceptionreplay/recursion", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 20", + "error.stack": "System.Exception: recursion exception depth 20\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origi", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion?depth=20", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_5": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.21.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.21.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.21.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.7.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.7.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.7.snapshot_id": "", + "_dd.debug.error.8.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.8.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.8.snapshot_id": "", + "_dd.debug.error.9.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.9.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.9.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-1900001729", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion (app)", + "aspnet_core.route": "exceptionreplay/recursion", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 20", + "error.stack": "System.Exception: recursion exception depth 20\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origi", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion?depth=20", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_6": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.21.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.21.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.21.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.7.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.7.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.7.snapshot_id": "", + "_dd.debug.error.8.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.8.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.8.snapshot_id": "", + "_dd.debug.error.9.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.9.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.9.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-1900001729", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion (app)", + "aspnet_core.route": "exceptionreplay/recursion", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 20", + "error.stack": "System.Exception: recursion exception depth 20\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origi", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion?depth=20", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_7": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.21.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.21.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.21.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.7.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.7.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.7.snapshot_id": "", + "_dd.debug.error.8.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.8.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.8.snapshot_id": "", + "_dd.debug.error.9.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.9.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.9.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-1900001729", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion (app)", + "aspnet_core.route": "exceptionreplay/recursion", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 20", + "error.stack": "System.Exception: recursion exception depth 20\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origi", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion?depth=20", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_8": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.21.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.21.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.21.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.7.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.7.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.7.snapshot_id": "", + "_dd.debug.error.8.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.8.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.8.snapshot_id": "", + "_dd.debug.error.9.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.9.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.9.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-1900001729", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion (app)", + "aspnet_core.route": "exceptionreplay/recursion", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 20", + "error.stack": "System.Exception: recursion exception depth 20\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origi", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion?depth=20", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_9": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.21.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.21.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.21.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.7.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.7.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.7.snapshot_id": "", + "_dd.debug.error.8.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.8.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.8.snapshot_id": "", + "_dd.debug.error.9.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.9.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.9.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-1900001729", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion (app)", + "aspnet_core.route": "exceptionreplay/recursion", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 20", + "error.stack": "System.Exception: recursion exception depth 20\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origi", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion?depth=20", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_10": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.21.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.21.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.21.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.7.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.7.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.7.snapshot_id": "", + "_dd.debug.error.8.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.8.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.8.snapshot_id": "", + "_dd.debug.error.9.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.9.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.9.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-1900001729", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", diff --git a/tests/debugger/approvals/exception_replay_recursion_3_dotnet_snapshots_expected.json b/tests/debugger/approvals/exception_replay_recursion_3_dotnet_snapshots_expected.json index 78531a637b..dfdc0ddd18 100644 --- a/tests/debugger/approvals/exception_replay_recursion_3_dotnet_snapshots_expected.json +++ b/tests/debugger/approvals/exception_replay_recursion_3_dotnet_snapshots_expected.json @@ -29,7 +29,123 @@ "lineNumber": 0 }, { - "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 3" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "depth": { + "type": "Int32", + "value": "3" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursion", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, + { + "": "" + } + ], + "exceptionHash": "d6c565ed-75ef-37cf-bb35-be62f4867e4e", + "exceptionId": "", + "frameIndex": "4", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 3", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", "lineNumber": 0 }, { @@ -94,10 +210,31 @@ } }, "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, { "": "" } ], + "exceptionHash": "d6c565ed-75ef-37cf-bb35-be62f4867e4e", + "exceptionId": "", + "frameIndex": "3", "id": "", "timestamp": "", "duration": "", @@ -132,10 +269,6 @@ "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "lineNumber": 0 }, - { - "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", - "lineNumber": 0 - }, { "": "" } @@ -180,7 +313,7 @@ }, "currentDepth": { "type": "Int32", - "value": "3" + "value": "2" }, "this": { "type": "ExceptionReplayController", @@ -198,10 +331,36 @@ } }, "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, { "": "" } ], + "exceptionHash": "d6c565ed-75ef-37cf-bb35-be62f4867e4e", + "exceptionId": "", + "frameIndex": "2", "id": "", "timestamp": "", "duration": "", @@ -297,17 +456,22 @@ { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -321,6 +485,9 @@ "": "" } ], + "exceptionHash": "d6c565ed-75ef-37cf-bb35-be62f4867e4e", + "exceptionId": "", + "frameIndex": "1", "id": "", "timestamp": "", "duration": "", @@ -412,22 +579,27 @@ { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -441,6 +613,9 @@ "": "" } ], + "exceptionHash": "d6c565ed-75ef-37cf-bb35-be62f4867e4e", + "exceptionId": "", + "frameIndex": "0", "id": "", "timestamp": "", "duration": "", diff --git a/tests/debugger/approvals/exception_replay_recursion_3_dotnet_spans_expected.json b/tests/debugger/approvals/exception_replay_recursion_3_dotnet_spans_expected.json index 8a8e0a2d84..5c86e02aa9 100644 --- a/tests/debugger/approvals/exception_replay_recursion_3_dotnet_spans_expected.json +++ b/tests/debugger/approvals/exception_replay_recursion_3_dotnet_spans_expected.json @@ -16,9 +16,12 @@ "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.4.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "-49259628", + "_dd.di._eh": "353773369", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", @@ -72,9 +75,12 @@ "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.4.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "-49259628", + "_dd.di._eh": "353773369", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", @@ -128,9 +134,12 @@ "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.4.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "-49259628", + "_dd.di._eh": "353773369", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", @@ -184,9 +193,71 @@ "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.4.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "-49259628", + "_dd.di._eh": "353773369", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion (app)", + "aspnet_core.route": "exceptionreplay/recursion", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 3", + "error.stack": "System.Exception: recursion exception depth 3\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origin", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion?depth=3", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_4": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "353773369", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", diff --git a/tests/debugger/approvals/exception_replay_recursion_4_dotnet_snapshots_expected.json b/tests/debugger/approvals/exception_replay_recursion_4_dotnet_snapshots_expected.json new file mode 100644 index 0000000000..177e5f517a --- /dev/null +++ b/tests/debugger/approvals/exception_replay_recursion_4_dotnet_snapshots_expected.json @@ -0,0 +1,599 @@ +[ + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 4", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 4" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "originalDepth": { + "type": "Int32", + "value": "4" + }, + "currentDepth": { + "type": "Int32", + "value": "4" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursionHelper", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "": "" + } + ], + "exceptionHash": "a4bd1fab-cb47-3e0b-8fad-6bf845b770bf", + "exceptionId": "", + "frameIndex": "2", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 4", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 4" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "originalDepth": { + "type": "Int32", + "value": "4" + }, + "currentDepth": { + "type": "Int32", + "value": "4" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursionHelper", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "": "" + } + ], + "exceptionHash": "a4bd1fab-cb47-3e0b-8fad-6bf845b770bf", + "exceptionId": "", + "frameIndex": "3", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 4", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 4" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "originalDepth": { + "type": "Int32", + "value": "4" + }, + "currentDepth": { + "type": "Int32", + "value": "4" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursionHelper", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, + { + "": "" + } + ], + "exceptionHash": "a4bd1fab-cb47-3e0b-8fad-6bf845b770bf", + "exceptionId": "", + "frameIndex": "4", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 4", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 4" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "originalDepth": { + "type": "Int32", + "value": "4" + }, + "currentDepth": { + "type": "Int32", + "value": "1" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursionHelper", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, + { + "": "" + } + ], + "exceptionHash": "a4bd1fab-cb47-3e0b-8fad-6bf845b770bf", + "exceptionId": "", + "frameIndex": "1", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 4", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 4" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "originalDepth": { + "type": "Int32", + "value": "4" + }, + "currentDepth": { + "type": "Int32", + "value": "0" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursionHelper", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, + { + "": "" + } + ], + "exceptionHash": "a4bd1fab-cb47-3e0b-8fad-6bf845b770bf", + "exceptionId": "", + "frameIndex": "0", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + } +] \ No newline at end of file diff --git a/tests/debugger/approvals/exception_replay_recursion_4_dotnet_spans_expected.json b/tests/debugger/approvals/exception_replay_recursion_4_dotnet_spans_expected.json new file mode 100644 index 0000000000..e03b5fd79c --- /dev/null +++ b/tests/debugger/approvals/exception_replay_recursion_4_dotnet_spans_expected.json @@ -0,0 +1,297 @@ +{ + "snapshot_0": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-691358470", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion_inline (app)", + "aspnet_core.route": "exceptionreplay/recursion_inline", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 4", + "error.stack": "System.Exception: recursion exception depth 4\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origin", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion_inline", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion_inline?depth=4", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion_inline", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_1": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-691358470", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion_inline (app)", + "aspnet_core.route": "exceptionreplay/recursion_inline", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 4", + "error.stack": "System.Exception: recursion exception depth 4\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origin", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion_inline", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion_inline?depth=4", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion_inline", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_2": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-691358470", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion_inline (app)", + "aspnet_core.route": "exceptionreplay/recursion_inline", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 4", + "error.stack": "System.Exception: recursion exception depth 4\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origin", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion_inline", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion_inline?depth=4", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion_inline", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_3": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-691358470", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion_inline (app)", + "aspnet_core.route": "exceptionreplay/recursion_inline", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 4", + "error.stack": "System.Exception: recursion exception depth 4\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origin", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion_inline", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion_inline?depth=4", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion_inline", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_4": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-691358470", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion_inline (app)", + "aspnet_core.route": "exceptionreplay/recursion_inline", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 4", + "error.stack": "System.Exception: recursion exception depth 4\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origin", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion_inline", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion_inline?depth=4", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion_inline", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + } +} \ No newline at end of file diff --git a/tests/debugger/approvals/exception_replay_recursion_5_dotnet_snapshots_expected.json b/tests/debugger/approvals/exception_replay_recursion_5_dotnet_snapshots_expected.json index ba30ffe502..b6a7425fe3 100644 --- a/tests/debugger/approvals/exception_replay_recursion_5_dotnet_snapshots_expected.json +++ b/tests/debugger/approvals/exception_replay_recursion_5_dotnet_snapshots_expected.json @@ -37,7 +37,131 @@ "lineNumber": 0 }, { - "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "lineNumber": 0 + }, + { + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 5" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "depth": { + "type": "Int32", + "value": "5" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursion", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, + { + "": "" + } + ], + "exceptionHash": "a72525ef-dc75-3a29-abc5-e7f2574a18ab", + "exceptionId": "", + "frameIndex": "6", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 5", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", "lineNumber": 0 }, { @@ -102,10 +226,31 @@ } }, "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, { "": "" } ], + "exceptionHash": "a72525ef-dc75-3a29-abc5-e7f2574a18ab", + "exceptionId": "", + "frameIndex": "5", "id": "", "timestamp": "", "duration": "", @@ -149,7 +294,133 @@ "lineNumber": 0 }, { - "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "": "" + } + ] + }, + "locals": { + "@exception": { + "type": "Exception", + "value": "Exception", + "fields": { + "Message": { + "type": "String", + "value": "recursion exception depth 5" + }, + "InnerException": { + "type": "Exception", + "isNull": "true" + }, + "HelpLink": { + "type": "String", + "isNull": "true" + }, + "Source": { + "type": "String", + "value": "app" + }, + "HResult": { + "type": "Int32", + "value": "-2146233088" + }, + "StackTrace": { + "type": "String", + "value": "" + } + } + } + }, + "arguments": { + "originalDepth": { + "type": "Int32", + "value": "5" + }, + "currentDepth": { + "type": "Int32", + "value": "4" + }, + "this": { + "type": "ExceptionReplayController", + "value": "ExceptionReplayController" + } + } + } + }, + "probe": { + "id": "", + "version": 1, + "location": { + "method": "exceptionReplayRecursionHelper", + "type": "weblog.ExceptionReplayController" + } + }, + "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, + { + "": "" + } + ], + "exceptionHash": "a72525ef-dc75-3a29-abc5-e7f2574a18ab", + "exceptionId": "", + "frameIndex": "4", + "id": "", + "timestamp": "", + "duration": "", + "language": "dotnet" + }, + { + "captures": { + "return": { + "staticFields": { + "Empty": { + "type": "EmptyResult", + "value": "EmptyResult" + } + }, + "throwable": { + "message": "recursion exception depth 5", + "type": "System.Exception", + "stacktrace": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "lineNumber": 0 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "lineNumber": 0 }, { @@ -196,7 +467,7 @@ }, "currentDepth": { "type": "Int32", - "value": "5" + "value": "3" }, "this": { "type": "ExceptionReplayController", @@ -214,10 +485,41 @@ } }, "stack": [ + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", + "lineNumber": 0 + }, + { + "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware.Invoke", + "lineNumber": 0 + }, { "": "" } ], + "exceptionHash": "a72525ef-dc75-3a29-abc5-e7f2574a18ab", + "exceptionId": "", + "frameIndex": "3", "id": "", "timestamp": "", "duration": "", @@ -317,22 +619,27 @@ { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -346,6 +653,9 @@ "": "" } ], + "exceptionHash": "a72525ef-dc75-3a29-abc5-e7f2574a18ab", + "exceptionId": "", + "frameIndex": "2", "id": "", "timestamp": "", "duration": "", @@ -441,27 +751,32 @@ { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -475,6 +790,9 @@ "": "" } ], + "exceptionHash": "a72525ef-dc75-3a29-abc5-e7f2574a18ab", + "exceptionId": "", + "frameIndex": "1", "id": "", "timestamp": "", "duration": "", @@ -566,32 +884,37 @@ { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 }, { "function": "weblog.ExceptionReplayController.exceptionReplayRecursionHelper", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 38 + "lineNumber": 46 + }, + { + "function": "weblog.ExceptionReplayController.exceptionReplayRecursion", + "fileName": "/app/ExceptionReplayController.cs", + "lineNumber": 28 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -605,6 +928,9 @@ "": "" } ], + "exceptionHash": "a72525ef-dc75-3a29-abc5-e7f2574a18ab", + "exceptionId": "", + "frameIndex": "0", "id": "", "timestamp": "", "duration": "", diff --git a/tests/debugger/approvals/exception_replay_recursion_5_dotnet_spans_expected.json b/tests/debugger/approvals/exception_replay_recursion_5_dotnet_spans_expected.json index 9445f576b9..55f1c1187a 100644 --- a/tests/debugger/approvals/exception_replay_recursion_5_dotnet_spans_expected.json +++ b/tests/debugger/approvals/exception_replay_recursion_5_dotnet_spans_expected.json @@ -16,12 +16,18 @@ "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.6.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "-79069184", + "_dd.di._eh": "-1928747995", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", @@ -75,12 +81,18 @@ "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.6.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "-79069184", + "_dd.di._eh": "-1928747995", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", @@ -134,12 +146,18 @@ "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.6.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "-79069184", + "_dd.di._eh": "-1928747995", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", @@ -193,12 +211,18 @@ "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.6.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "-79069184", + "_dd.di._eh": "-1928747995", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", @@ -252,12 +276,148 @@ "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.6.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", - "_dd.di._eh": "-79069184", + "_dd.di._eh": "-1928747995", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion (app)", + "aspnet_core.route": "exceptionreplay/recursion", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 5", + "error.stack": "System.Exception: recursion exception depth 5\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origin", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion?depth=5", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_5": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-1928747995", + "_dd.di._er": "Eligible", + "_dd.iast.enabled": "1", + "_dd.runtime_family": "dotnet", + "aspnet_core.endpoint": "weblog.ExceptionReplayController.exceptionReplayRecursion (app)", + "aspnet_core.route": "exceptionreplay/recursion", + "component": "aspnet_core", + "env": "system-tests", + "error.debug_info_captured": "true", + "error.msg": "recursion exception depth 5", + "error.stack": "System.Exception: recursion exception depth 5\n at weblog.ExceptionReplayController.exceptionReplayRecursionHelper(Int32 origin", + "error.type": "System.Exception", + "http.client_ip": "", + "http.method": "GET", + "http.request.headers.host": "localhost:7777", + "http.request.headers.user-agent": "", + "http.route": "exceptionreplay/recursion", + "http.status_code": "500", + "http.url": "http://localhost:7777/exceptionreplay/recursion?depth=5", + "http.useragent": "", + "key1": "val1", + "key2": "val2", + "language": "dotnet", + "network.client.ip": "", + "runtime-id": "", + "span.kind": "server", + "version": "" + }, + "metrics": "", + "name": "aspnet_core.request", + "resource": "GET /exceptionreplay/recursion", + "service": "weblog", + "spanID": "", + "start": "", + "traceID": "", + "type": "web" + }, + "snapshot_6": { + "duration": "", + "error": 1, + "meta": { + "_dd.appsec.event_rules.version": "", + "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.0.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.0.snapshot_id": "", + "_dd.debug.error.1.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.1.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.1.snapshot_id": "", + "_dd.debug.error.2.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.2.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.2.snapshot_id": "", + "_dd.debug.error.3.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.3.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.3.snapshot_id": "", + "_dd.debug.error.4.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.4.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.4.snapshot_id": "", + "_dd.debug.error.5.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.5.frame_data.function": "exceptionReplayRecursionHelper", + "_dd.debug.error.5.snapshot_id": "", + "_dd.debug.error.6.frame_data.class_name": "ExceptionReplayController", + "_dd.debug.error.6.frame_data.function": "exceptionReplayRecursion", + "_dd.debug.error.6.snapshot_id": "", + "_dd.debug.error.exception_hash": "", + "_dd.debug.error.exception_id": "", + "_dd.di._eh": "-1928747995", "_dd.di._er": "Eligible", "_dd.iast.enabled": "1", "_dd.runtime_family": "dotnet", diff --git a/tests/debugger/approvals/exception_replay_rockpaperscissors_dotnet_snapshots_expected.json b/tests/debugger/approvals/exception_replay_rockpaperscissors_dotnet_snapshots_expected.json index 96fd4c2f5e..007e737af0 100644 --- a/tests/debugger/approvals/exception_replay_rockpaperscissors_dotnet_snapshots_expected.json +++ b/tests/debugger/approvals/exception_replay_rockpaperscissors_dotnet_snapshots_expected.json @@ -77,7 +77,7 @@ { "function": "weblog.ExceptionReplayController.ExceptionReplayRockPaperScissors", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 75 + "lineNumber": 83 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -91,6 +91,9 @@ "": "" } ], + "exceptionHash": "3f123b08-8c82-337a-9a28-125bd42e0eaf", + "exceptionId": "", + "frameIndex": "0", "id": "", "timestamp": "", "duration": "", @@ -174,7 +177,7 @@ { "function": "weblog.ExceptionReplayController.ExceptionReplayRockPaperScissors", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 75 + "lineNumber": 83 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -188,6 +191,9 @@ "": "" } ], + "exceptionHash": "e6882fa3-e986-3f7f-830f-322e1881ade5", + "exceptionId": "", + "frameIndex": "0", "id": "", "timestamp": "", "duration": "", @@ -271,7 +277,7 @@ { "function": "weblog.ExceptionReplayController.ExceptionReplayRockPaperScissors", "fileName": "/app/ExceptionReplayController.cs", - "lineNumber": 75 + "lineNumber": 83 }, { "function": "Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNetCore.BlockingMiddleware+", @@ -285,6 +291,9 @@ "": "" } ], + "exceptionHash": "71174315-56b8-3c4b-83db-90764540ceae", + "exceptionId": "", + "frameIndex": "0", "id": "", "timestamp": "", "duration": "", diff --git a/tests/debugger/approvals/exception_replay_rockpaperscissors_dotnet_spans_expected.json b/tests/debugger/approvals/exception_replay_rockpaperscissors_dotnet_spans_expected.json index 373756405f..9261dcf592 100644 --- a/tests/debugger/approvals/exception_replay_rockpaperscissors_dotnet_spans_expected.json +++ b/tests/debugger/approvals/exception_replay_rockpaperscissors_dotnet_spans_expected.json @@ -6,6 +6,8 @@ "_dd.appsec.event_rules.version": "", "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.0.frame_data.function": "ExceptionReplayRockPaperScissors", + "_dd.debug.error.0.frame_data.name": "ExceptionReplayRockPaperScissors", + "_dd.debug.error.0.no_capture_reason": "The method ExceptionReplayRockPaperScissors could not be found.", "_dd.debug.error.0.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", @@ -53,6 +55,8 @@ "_dd.appsec.event_rules.version": "", "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.0.frame_data.function": "ExceptionReplayRockPaperScissors", + "_dd.debug.error.0.frame_data.name": "ExceptionReplayRockPaperScissors", + "_dd.debug.error.0.no_capture_reason": "The method ExceptionReplayRockPaperScissors could not be found.", "_dd.debug.error.0.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", @@ -100,6 +104,8 @@ "_dd.appsec.event_rules.version": "", "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.0.frame_data.function": "ExceptionReplayRockPaperScissors", + "_dd.debug.error.0.frame_data.name": "ExceptionReplayRockPaperScissors", + "_dd.debug.error.0.no_capture_reason": "The method ExceptionReplayRockPaperScissors could not be found.", "_dd.debug.error.0.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", diff --git a/tests/debugger/approvals/exception_replay_simple_dotnet_snapshots_expected.json b/tests/debugger/approvals/exception_replay_simple_dotnet_snapshots_expected.json index 350719f76e..57a7298118 100644 --- a/tests/debugger/approvals/exception_replay_simple_dotnet_snapshots_expected.json +++ b/tests/debugger/approvals/exception_replay_simple_dotnet_snapshots_expected.json @@ -87,6 +87,9 @@ "": "" } ], + "exceptionHash": "57c48f18-b30f-32cd-a5dc-22a2b3c36416", + "exceptionId": "", + "frameIndex": "0", "id": "", "timestamp": "", "duration": "", diff --git a/tests/debugger/approvals/exception_replay_simple_dotnet_spans_expected.json b/tests/debugger/approvals/exception_replay_simple_dotnet_spans_expected.json index 04bb282eac..3d12bf212a 100644 --- a/tests/debugger/approvals/exception_replay_simple_dotnet_spans_expected.json +++ b/tests/debugger/approvals/exception_replay_simple_dotnet_spans_expected.json @@ -6,6 +6,8 @@ "_dd.appsec.event_rules.version": "", "_dd.debug.error.0.frame_data.class_name": "ExceptionReplayController", "_dd.debug.error.0.frame_data.function": "ExceptionReplaySimple", + "_dd.debug.error.0.frame_data.name": "ExceptionReplaySimple", + "_dd.debug.error.0.no_capture_reason": "The method ExceptionReplaySimple could not be found.", "_dd.debug.error.0.snapshot_id": "", "_dd.debug.error.exception_hash": "", "_dd.debug.error.exception_id": "", diff --git a/tests/debugger/test_debugger_exception_replay.py b/tests/debugger/test_debugger_exception_replay.py index 7a1d2b44e4..67102ded9b 100644 --- a/tests/debugger/test_debugger_exception_replay.py +++ b/tests/debugger/test_debugger_exception_replay.py @@ -5,7 +5,7 @@ import tests.debugger.utils as debugger import os import re -from utils import scenarios, features, bug, context, flaky +from utils import scenarios, features, bug, context, flaky, irrelevant from utils.tools import logger @@ -310,11 +310,8 @@ def _validate_recursion_snapshots(self, snapshots, limit): helper_method = "exceptionReplayRecursionHelper" def get_frames(snapshot): - if self.get_tracer()["language"] == "dotnet": - return snapshot.get("captures", {}).get("return", {}).get("throwable", {}).get("stacktrace", []) - - if self.get_tracer()["language"] == "java": - method = snapshot.get("probe", {}).get("location", {}).get("method", "") + if self.get_tracer()["language"] in ["java", "dotnet"]: + method = snapshot.get("probe", {}).get("location", {}).get("method", None) if method: return [{"function": method}] @@ -325,14 +322,14 @@ def get_frames(snapshot): def check_frames(frames): nonlocal found_top, found_lowest + for frame in frames: - if isinstance(frame, dict) and "function" in frame: - if entry_method in frame["function"]: - found_top = True - if helper_method in frame["function"]: - found_lowest = True - if found_top and found_lowest: - break + if entry_method == frame["function"]: + found_top = True + if helper_method == frame["function"]: + found_lowest = True + if found_top and found_lowest: + break for snapshot in snapshots: check_frames(get_frames(snapshot)) @@ -348,7 +345,7 @@ def check_frames(frames): def setup_exception_replay_simple(self): self._setup("/exceptionreplay/simple", "simple exception") - @bug(context.library == "dotnet", reason="DEBUG-2799") + @bug(context.library < "dotnet@3.10.0", reason="DEBUG-2799") @bug(context.library == "python", reason="DEBUG-3257") @bug(context.library < "java@1.46.0", reason="DEBUG-3285") def test_exception_replay_simple(self): @@ -358,7 +355,7 @@ def test_exception_replay_simple(self): def setup_exception_replay_recursion_3(self): self._setup("/exceptionreplay/recursion?depth=3", "recursion exception depth 3") - @bug(context.library == "dotnet", reason="DEBUG-2799, DEBUG-3283") + @bug(context.library < "dotnet@3.10.0", reason="DEBUG-2799") @bug(context.library == "python", reason="DEBUG-3257, DEBUG-3282") @bug(context.library < "java@1.46.0", reason="DEBUG-3285") def test_exception_replay_recursion_3(self): @@ -368,7 +365,8 @@ def test_exception_replay_recursion_3(self): def setup_exception_replay_recursion_5(self): self._setup("/exceptionreplay/recursion?depth=5", "recursion exception depth 5") - @bug(context.library == "dotnet", reason="DEBUG-2799, DEBUG-3283") + @bug(context.library < "dotnet@3.10.0", reason="DEBUG-2799") + @bug(context.library == "dotnet", reason="DEBUG-3283") @bug(context.library == "python", reason="DEBUG-3257, DEBUG-3282") @bug(context.library < "java@1.46.0", reason="DEBUG-3285") def test_exception_replay_recursion_5(self): @@ -378,7 +376,8 @@ def test_exception_replay_recursion_5(self): def setup_exception_replay_recursion_20(self): self._setup("/exceptionreplay/recursion?depth=20", "recursion exception depth 20") - @bug(context.library == "dotnet", reason="DEBUG-2799, DEBUG-3283") + @bug(context.library < "dotnet@3.10.0", reason="DEBUG-2799") + @bug(context.library == "dotnet", reason="DEBUG-3283") @bug(context.library == "python", reason="DEBUG-3257, DEBUG-3282") @bug(context.library < "java@1.46.0", reason="DEBUG-3285") @bug(context.library == "java", reason="DEBUG-3390") @@ -386,11 +385,20 @@ def test_exception_replay_recursion_20(self): self._assert("exception_replay_recursion_20", ["recursion exception depth 20"]) self._validate_recursion_snapshots(self.snapshots, 10) + def setup_exception_replay_recursion_inlined(self): + self._setup("/exceptionreplay/recursion_inline?depth=4", "recursion exception depth 4") + + @irrelevant(context.library != "dotnet", reason="Test for specific bug in dotnet") + @bug(context.library == "dotnet", reason="DEBUG-3447") + def test_exception_replay_recursion_inlined(self): + self._assert("exception_replay_recursion_4", ["recursion exception depth 4"]) + self._validate_recursion_snapshots(self.snapshots, 4) + ############ Inner ############ def setup_exception_replay_inner(self): self._setup("/exceptionreplay/inner", "outer exception") - @bug(context.library == "dotnet", reason="DEBUG-2799") + @bug(context.library < "dotnet@3.10.0", reason="DEBUG-2799") @bug(context.library == "python", reason="DEBUG-3256, DEBUG-3257") @bug(context.library < "java@1.46.0", reason="DEBUG-3285") def test_exception_replay_inner(self): @@ -406,8 +414,7 @@ def setup_exception_replay_rockpaperscissors(self): shapes = {"rock": False, "paper": False, "scissors": False} while not all(shapes.values()) and retries < _max_retries: - for shape in shapes.keys(): - shape_found = shapes[shape] + for shape, shape_found in shapes.items(): logger.debug(f"{shape} found: {shape_found}, retry #{retries}") if shape_found: @@ -421,7 +428,7 @@ def setup_exception_replay_rockpaperscissors(self): retries += 1 - @bug(context.library == "dotnet", reason="DEBUG-2799") + @bug(context.library < "dotnet@3.10.0", reason="DEBUG-2799") @bug(context.library == "python", reason="DEBUG-3257") @bug(context.library < "java@1.46.0", reason="DEBUG-3285") def test_exception_replay_rockpaperscissors(self): @@ -431,7 +438,7 @@ def test_exception_replay_rockpaperscissors(self): def setup_exception_replay_multiframe(self): self._setup("/exceptionreplay/multiframe", "multiple stack frames exception") - @bug(context.library == "dotnet", reason="DEBUG-2799") + @bug(context.library < "dotnet@3.10.0", reason="DEBUG-2799") @bug(context.library == "python", reason="DEBUG-3257") @bug(context.library < "java@1.46.0", reason="DEBUG-3285") def test_exception_replay_multiframe(self): @@ -441,7 +448,6 @@ def test_exception_replay_multiframe(self): def setup_exception_replay_async(self): self._setup("/exceptionreplay/async", "async exception") - @bug(context.library == "dotnet", reason="DEBUG-2799") @flaky(context.library == "dotnet", reason="DEBUG-3281") @bug(context.library == "python", reason="DEBUG-3257") @bug(context.library < "java@1.46.0", reason="DEBUG-3285") diff --git a/tests/debugger/test_debugger_expression_language.py b/tests/debugger/test_debugger_expression_language.py index 2a98ce39bd..abb81e6bf6 100644 --- a/tests/debugger/test_debugger_expression_language.py +++ b/tests/debugger/test_debugger_expression_language.py @@ -3,7 +3,8 @@ # Copyright 2021 Datadog, Inc. import tests.debugger.utils as debugger -import re, json +import re +import json from utils import scenarios, features, bug, missing_feature, context @@ -650,9 +651,7 @@ def _get_hash_value_property_name(self): return "value" def _method_and_language_to_line_number(self, method, language): - """ - _method_and_language_to_line_number returns the respective line number given the method and language - """ + """_method_and_language_to_line_number returns the respective line number given the method and language""" return { "Expression": {"java": [71], "dotnet": [74], "python": [72]}, # The `@exception` variable is not available in the context of line probes. @@ -663,7 +662,7 @@ def _method_and_language_to_line_number(self, method, language): "Nulls": {"java": [130], "dotnet": [127], "python": [136]}, }.get(method, {}).get(language, []) - def _create_expression_probes(self, methodName, expressions, lines=[]): + def _create_expression_probes(self, methodName, expressions, lines=()): probes = [] expected_message_map = {} prob_types = ["method"] diff --git a/tests/debugger/test_debugger_pii.py b/tests/debugger/test_debugger_pii.py index 794db7ca54..08bee5fe6d 100644 --- a/tests/debugger/test_debugger_pii.py +++ b/tests/debugger/test_debugger_pii.py @@ -224,8 +224,6 @@ def test_pii_redaction_line_full(self): self._assert(REDACTED_KEYS, REDACTED_TYPES, line_probe=True) ############ old versions ############ - def filter(keys_to_filter): - return [item for item in REDACTED_KEYS if item not in keys_to_filter] def setup_pii_redaction_java_1_33(self): self._setup() diff --git a/tests/debugger/test_debugger_probe_status.py b/tests/debugger/test_debugger_probe_status.py index 360bc1c665..51b06dfab1 100644 --- a/tests/debugger/test_debugger_probe_status.py +++ b/tests/debugger/test_debugger_probe_status.py @@ -58,7 +58,7 @@ def _check_probe_status(expected_id, expected_status): if error_message is not None: errors.append(error_message) - assert not errors, f"Probe status errors:\n" + "\n".join(errors) + assert not errors, "Probe status errors:\n" + "\n".join(errors) ############ log line probe ############ def setup_probe_status_log_line(self): diff --git a/tests/debugger/test_debugger_symdb.py b/tests/debugger/test_debugger_symdb.py index c682baa8e8..8bfd48fe63 100644 --- a/tests/debugger/test_debugger_symdb.py +++ b/tests/debugger/test_debugger_symdb.py @@ -42,10 +42,7 @@ def check_scope(scope): name = scope.get("name", "") if re.search(pattern, name): scope_type = scope.get("scope_type", "") - if scope_type in ["CLASS", "class", "MODULE"]: - return True - - return False + return scope_type in ["CLASS", "class", "MODULE"] for nested_scope in scope.get("scopes", []): if check_scope(nested_scope): diff --git a/tests/debugger/utils.py b/tests/debugger/utils.py index 3bc805da90..0bd68c6d79 100644 --- a/tests/debugger/utils.py +++ b/tests/debugger/utils.py @@ -7,8 +7,6 @@ import os import os.path import uuid -import gzip -import io from utils import interfaces, remote_config, weblog, context from utils.tools import logger @@ -43,10 +41,9 @@ def extract_probe_ids(probes): return [] -def _get_path(test_name, suffix): +def _get_path(test_name, suffix) -> str: filename = test_name + "_" + _Base_Debugger_Test.tracer["language"] + "_" + suffix + ".json" - path = os.path.join(_CUR_DIR, "approvals", filename) - return path + return os.path.join(_CUR_DIR, "approvals", filename) def write_approval(data, test_name, suffix): @@ -280,11 +277,7 @@ def _read_data(): path = _DEBUGGER_PATH else: path = _LOGS_PATH - elif context.library == "python": - path = _DEBUGGER_PATH - elif context.library == "ruby": - path = _DEBUGGER_PATH - elif context.library == "nodejs": + elif context.library == "python" or context.library == "ruby" or context.library == "nodejs": path = _DEBUGGER_PATH else: path = _LOGS_PATH # TODO: Should the default not be _DEBUGGER_PATH? @@ -307,11 +300,11 @@ def _process_debugger(debugger): # update status if probe_id in probe_diagnostics: current_status = probe_diagnostics[probe_id]["status"] - if current_status == "RECEIVED": - probe_diagnostics[probe_id]["status"] = status - elif current_status == "INSTALLED" and status in ["INSTALLED", "EMITTING"]: - probe_diagnostics[probe_id]["status"] = status - elif current_status == "EMITTING" and status == "EMITTING": + if ( + current_status == "RECEIVED" + or (current_status == "INSTALLED" and status in ["INSTALLED", "EMITTING"]) + or (current_status == "EMITTING" and status == "EMITTING") + ): probe_diagnostics[probe_id]["status"] = status # set new status else: @@ -326,10 +319,9 @@ def _process_debugger(debugger): for d_content in d_contents: if isinstance(d_content, dict): _process_debugger(d_content["debugger"]) - else: - if "debugger" in content: - if isinstance(content, dict): - _process_debugger(content["debugger"]) + elif "debugger" in content: + if isinstance(content, dict): + _process_debugger(content["debugger"]) return probe_diagnostics diff --git a/tests/docker_ssi/test_docker_ssi.py b/tests/docker_ssi/test_docker_ssi.py index 4d301920b2..0b0213368c 100644 --- a/tests/docker_ssi/test_docker_ssi.py +++ b/tests/docker_ssi/test_docker_ssi.py @@ -9,7 +9,8 @@ class TestDockerSSIFeatures: """Test the ssi in a simulated host injection environment (docker container + test agent) We test that the injection is performed and traces and telemetry are generated. - If the language version is not supported, we only check that we don't break the app and telemetry is generated.""" + If the language version is not supported, we only check that we don't break the app and telemetry is generated. + """ _r = None @@ -111,10 +112,10 @@ def test_telemetry_abort(self): inject_result = False break - assert inject_result != None, "No telemetry data found for inject.success, inject.skip or inject.error" + assert inject_result is not None, "No telemetry data found for inject.success, inject.skip or inject.error" # The injector detected by itself that the version is not supported - if inject_result == False: + if inject_result is False: return # There is telemetry data about the library entrypoint. We only validate there is data diff --git a/tests/docker_ssi/test_docker_ssi_crash.py b/tests/docker_ssi/test_docker_ssi_crash.py index 8a73ea6cb6..2d2af661b9 100644 --- a/tests/docker_ssi/test_docker_ssi_crash.py +++ b/tests/docker_ssi/test_docker_ssi_crash.py @@ -15,7 +15,8 @@ @scenarios.docker_ssi class TestDockerSSICrash: """Test the ssi in a simulated host injection environment (docker container + test agent) - We test scenarios when the application crashes and sends a crash report.""" + We test scenarios when the application crashes and sends a crash report. + """ _r = None diff --git a/tests/fuzzer/core.py b/tests/fuzzer/core.py index 93089e6332..60ef50f595 100644 --- a/tests/fuzzer/core.py +++ b/tests/fuzzer/core.py @@ -10,12 +10,10 @@ from logging.handlers import RotatingFileHandler import os import signal -import time import aiohttp from yarl import URL -from utils import context from tests.fuzzer.corpus import get_corpus from tests.fuzzer.request_mutator import get_mutator @@ -51,7 +49,7 @@ def __call__(self, payload): return if self.logger is None: - self.logger = logging.Logger(__name__) + self.logger = logging.getLogger(__name__) self.logger.addHandler(RotatingFileHandler(self.filename)) self.logger.info(json.dumps(payload)) @@ -96,7 +94,7 @@ def __init__( self.dump_on_status = dump_on_status self.enable_response_dump = False - self.systematic_exporter = _RequestDumper() if systematic_export else lambda x: 0 + self.systematic_exporter = _RequestDumper() if systematic_export else lambda _: 0 self.total_metric = AccumulatedMetric("#", format_string="#{value}", display_length=7, has_raw_value=False) self.memory_metric = NumericalMetric("Mem") @@ -155,7 +153,7 @@ async def wait_for_first_response(self): self.logger.info(f"First response received after {i} attempts") return - time.sleep(1) + await asyncio.sleep(1) raise Exception("Server does not respond") finally: @@ -165,10 +163,10 @@ def run_forever(self): self.logger.info("") self.logger.info("=" * 80) - asyncio.ensure_future(self._run(), loop=self.loop) + task = asyncio.ensure_future(self._run(), loop=self.loop) self.loop.add_signal_handler(signal.SIGINT, self.perform_armageddon) self.logger.info("Starting event loop") - self.loop.run_forever() + self.loop.run_until_complete(task) def perform_armageddon(self): self.finished = True @@ -208,8 +206,8 @@ async def watch_docker_target(self): async def _run(self): try: await self.wait_for_first_response() - except Exception as e: - self.logger.error(str(e)) + except Exception: + self.logger.exception("First response failed") self.loop.stop() return @@ -259,7 +257,7 @@ async def _run(self): task = self.loop.create_task(self._process(session, request)) tasks.add(task) task.add_done_callback(tasks.remove) - task.add_done_callback(lambda t: self.sem.release()) + task.add_done_callback(lambda _: self.sem.release()) request_id += 1 diff --git a/tests/fuzzer/corpus.py b/tests/fuzzer/corpus.py index a8c1a7a9a0..0e9f6e2c33 100644 --- a/tests/fuzzer/corpus.py +++ b/tests/fuzzer/corpus.py @@ -5,6 +5,7 @@ import os import sys import json +from pathlib import Path from tests.fuzzer.tools.random_strings import get_random_unicode as gru @@ -23,8 +24,7 @@ def get_attack10_corpus(): def get_big_requests_corpus(): - """ - Send huge requests. + """Send huge requests. Should be run with -c 1 Need a better ouput to interrpret results... @@ -163,12 +163,12 @@ def _load_dir(base_dirname): _load_dir(os.path.join(base_dirname, dirname)) for filename in filenames: - if filename.endswith(".json") or filename.endswith(".dump"): + if filename.endswith((".json", ".dump")): _load_file(os.path.join(base_dirname, filename)) - if os.path.isfile(source): + if Path(source).is_file(): _load_file(source) - elif os.path.isdir(source): + elif Path(source).is_dir(): _load_dir(source) else: raise ValueError(f"{source} is not a file or a dir") diff --git a/tests/fuzzer/main.py b/tests/fuzzer/main.py index a3db0c5f0c..47dd969bab 100644 --- a/tests/fuzzer/main.py +++ b/tests/fuzzer/main.py @@ -68,6 +68,7 @@ def main(): weblog = WeblogContainer(host_log_folder="logs_fuzzer", use_proxy=False) weblog.configure(False) weblog.start(network) + weblog.post_start() Fuzzer( corpus=args.corpus, diff --git a/tests/fuzzer/request_mutator.py b/tests/fuzzer/request_mutator.py index e2f60ae7a7..79f4f3ff1c 100644 --- a/tests/fuzzer/request_mutator.py +++ b/tests/fuzzer/request_mutator.py @@ -90,7 +90,7 @@ def _mutate_item(item): item = random.choice((True, False)) else: - # TODO + # TODO: other use cases pass return item @@ -538,9 +538,7 @@ def get_payload_value(self, allow_nested=False): ################################ def clean_request(self, request): - """ - The purpose if this function is to clean requests from corpus that may cause a HTTP 500 response - """ + """The purpose if this function is to clean requests from corpus that may cause a HTTP 500 response""" # request["path"] = request["path"][:self.max_path_length] diff --git a/tests/fuzzer/tools/__init__.py b/tests/fuzzer/tools/__init__.py index c5d450fadb..06b5a67d01 100644 --- a/tests/fuzzer/tools/__init__.py +++ b/tests/fuzzer/tools/__init__.py @@ -8,9 +8,7 @@ def apply_method(obj, key_callback, value_callback): - """ - Recursyvly apply methods on a JSON-like object - """ + """Recursyvly apply methods on a JSON-like object""" if obj is None or isinstance(obj, (str, float, int, bool)): return value_callback(obj) diff --git a/tests/fuzzer/tools/_tools.py b/tests/fuzzer/tools/_tools.py index bcd094ebbc..e1faf77999 100644 --- a/tests/fuzzer/tools/_tools.py +++ b/tests/fuzzer/tools/_tools.py @@ -4,14 +4,10 @@ class cached_property: - """ - Descriptor (non-data) for building an attribute on-demand on first use. - """ + """Descriptor (non-data) for building an attribute on-demand on first use.""" def __init__(self, factory): - """ - is called such: factory(instance) to build the attribute. - """ + """ is called such: factory(instance) to build the attribute.""" self._attr_name = factory.__name__ self._factory = factory diff --git a/tests/fuzzer/tools/metrics.py b/tests/fuzzer/tools/metrics.py index 8d95f0599e..5faf5d3096 100644 --- a/tests/fuzzer/tools/metrics.py +++ b/tests/fuzzer/tools/metrics.py @@ -45,38 +45,28 @@ def update(self, value=None): self.global_value = value def observe(self): - """ - Will be called before printing - """ + """Will be called before printing""" def observe_global_value(self): self.value = self.global_value self.observe() def reset(self): - """ - Will be called after printing - """ + """Will be called after printing""" @property def pretty(self): - """ - Will be printed - """ + """Will be printed""" return self.format_string.format(value=str(self.value)) @property def raw(self): - """ - Will be exported for later analysis - """ + """Will be exported for later analysis""" return self.value @property def is_null(self): - """ - if true, will not reported in log file - """ + """If true, will not reported in log file""" return False diff --git a/tests/integrations/crossed_integrations/test_kafka.py b/tests/integrations/crossed_integrations/test_kafka.py index b3f25d7ab8..d958502770 100644 --- a/tests/integrations/crossed_integrations/test_kafka.py +++ b/tests/integrations/crossed_integrations/test_kafka.py @@ -50,8 +50,7 @@ def get_topic(span) -> str | None: return topic def setup_produce(self): - """ - send request A to weblog : this request will produce a kafka message + """Send request A to weblog : this request will produce a kafka message send request B to library buddy, this request will consume kafka message """ self.production_response = weblog.get( @@ -89,8 +88,7 @@ def test_produce_trace_equality(self): assert producer_span["trace_id"] == consumer_span["trace_id"] def setup_consume(self): - """ - send request A to library buddy : this request will produce a kafka message + """Send request A to library buddy : this request will produce a kafka message send request B to weblog, this request will consume kafka message request A: GET /library_buddy/produce_kafka_message @@ -131,8 +129,7 @@ def test_consume_trace_equality(self): assert producer_span["trace_id"] == consumer_span["trace_id"] def validate_kafka_spans(self, producer_interface, consumer_interface, topic): - """ - Validates production/consumption of kafka message. + """Validates production/consumption of kafka message. It works the same for both test_produce and test_consume """ diff --git a/tests/integrations/crossed_integrations/test_kinesis.py b/tests/integrations/crossed_integrations/test_kinesis.py index c550ffa6fe..850e58d1f8 100644 --- a/tests/integrations/crossed_integrations/test_kinesis.py +++ b/tests/integrations/crossed_integrations/test_kinesis.py @@ -2,11 +2,9 @@ import json from utils.buddies import python_buddy -from utils import interfaces, scenarios, weblog, missing_feature, features, context, irrelevant +from utils import interfaces, scenarios, weblog, missing_feature, features, context from utils.tools import logger -from tests.integrations.utils import delete_kinesis_stream - class _Test_Kinesis: """Test Kinesis compatibility with inputted datadog tracer""" @@ -72,26 +70,22 @@ def get_stream(span) -> str | None: return stream def setup_produce(self): - """ - send request A to weblog : this request will produce a Kinesis message + """Send request A to weblog : this request will produce a Kinesis message send request B to library buddy, this request will consume Kinesis message """ - try: - message = ( - "[crossed_integrations/test_kinesis.py][Kinesis] Hello from Kinesis " - f"[{context.library.library} weblog->{self.buddy_interface.name}] test produce at {self.unique_id}" - ) - - self.production_response = weblog.get( - "/kinesis/produce", params={"stream": self.WEBLOG_TO_BUDDY_STREAM, "message": message}, timeout=120 - ) - self.consume_response = self.buddy.get( - "/kinesis/consume", - params={"stream": self.WEBLOG_TO_BUDDY_STREAM, "message": message, "timeout": 60}, - timeout=61, - ) - finally: - delete_kinesis_stream(self.WEBLOG_TO_BUDDY_STREAM) + message = ( + "[crossed_integrations/test_kinesis.py][Kinesis] Hello from Kinesis " + f"[{context.library.library} weblog->{self.buddy_interface.name}] test produce at {self.unique_id}" + ) + + self.production_response = weblog.get( + "/kinesis/produce", params={"stream": self.WEBLOG_TO_BUDDY_STREAM, "message": message}, timeout=120 + ) + self.consume_response = self.buddy.get( + "/kinesis/consume", + params={"stream": self.WEBLOG_TO_BUDDY_STREAM, "message": message, "timeout": 60}, + timeout=61, + ) def test_produce(self): """Check that a message produced to Kinesis is correctly ingested by a Datadog tracer""" @@ -132,29 +126,25 @@ def test_produce_trace_equality(self): assert producer_span["trace_id"] == consumer_span["trace_id"] def setup_consume(self): - """ - send request A to library buddy : this request will produce a Kinesis message + """Send request A to library buddy : this request will produce a Kinesis message send request B to weblog, this request will consume Kinesis message request A: GET /library_buddy/produce_kinesis_message request B: GET /weblog/consume_kinesis_message """ - try: - message = ( - "[crossed_integrations/test_kinesis.py][Kinesis] Hello from Kinesis " - f"[{self.buddy_interface.name}->{context.library.library} weblog] test consume at {self.unique_id}" - ) - - self.production_response = self.buddy.get( - "/kinesis/produce", params={"stream": self.BUDDY_TO_WEBLOG_STREAM, "message": message}, timeout=500 - ) - self.consume_response = weblog.get( - "/kinesis/consume", - params={"stream": self.BUDDY_TO_WEBLOG_STREAM, "message": message, "timeout": 60}, - timeout=61, - ) - finally: - delete_kinesis_stream(self.BUDDY_TO_WEBLOG_STREAM) + message = ( + "[crossed_integrations/test_kinesis.py][Kinesis] Hello from Kinesis " + f"[{self.buddy_interface.name}->{context.library.library} weblog] test consume at {self.unique_id}" + ) + + self.production_response = self.buddy.get( + "/kinesis/produce", params={"stream": self.BUDDY_TO_WEBLOG_STREAM, "message": message}, timeout=500 + ) + self.consume_response = weblog.get( + "/kinesis/consume", + params={"stream": self.BUDDY_TO_WEBLOG_STREAM, "message": message, "timeout": 60}, + timeout=61, + ) def test_consume(self): """Check that a message by an app instrumented by a Datadog tracer is correctly ingested""" @@ -195,8 +185,7 @@ def test_consume_trace_equality(self): assert producer_span["trace_id"] == consumer_span["trace_id"] def validate_kinesis_spans(self, producer_interface, consumer_interface, stream): - """ - Validates production/consumption of Kinesis message. + """Validates production/consumption of Kinesis message. It works the same for both test_produce and test_consume """ @@ -215,7 +204,6 @@ def validate_kinesis_spans(self, producer_interface, consumer_interface, stream) @scenarios.crossed_tracing_libraries -@irrelevant(True, reason="AWS Tests are not currently stable.") @features.aws_kinesis_span_creationcontext_propagation_via_message_attributes_with_dd_trace class Test_Kinesis_PROPAGATION_VIA_MESSAGE_ATTRIBUTES(_Test_Kinesis): buddy_interface = interfaces.python_buddy diff --git a/tests/integrations/crossed_integrations/test_rabbitmq.py b/tests/integrations/crossed_integrations/test_rabbitmq.py index 72bb0ccf18..837ea8d29e 100644 --- a/tests/integrations/crossed_integrations/test_rabbitmq.py +++ b/tests/integrations/crossed_integrations/test_rabbitmq.py @@ -44,9 +44,9 @@ def get_span(cls, interface, span_kind, queue, exchange, operation): if ( queue.lower() not in span.get("resource").lower() and exchange.lower() not in span.get("resource").lower() - and queue.lower() not in meta.get(f"rabbitmq.routing_key", "").lower() + and queue.lower() not in meta.get("rabbitmq.routing_key", "").lower() # this is where we find the queue name in dotnet 👇 - and queue.lower() not in meta.get(f"amqp.routing_key", "").lower() + and queue.lower() not in meta.get("amqp.routing_key", "").lower() ): continue @@ -57,8 +57,7 @@ def get_span(cls, interface, span_kind, queue, exchange, operation): return None def setup_produce(self): - """ - send request A to weblog : this request will produce a RabbitMQ message + """Send request A to weblog : this request will produce a RabbitMQ message send request B to library buddy, this request will consume RabbitMQ message """ @@ -121,8 +120,7 @@ def test_produce_trace_equality(self): assert producer_span["trace_id"] == consumer_span["trace_id"] def setup_consume(self): - """ - send request A to library buddy : this request will produce a RabbitMQ message + """Send request A to library buddy : this request will produce a RabbitMQ message send request B to weblog, this request will consume RabbitMQ message request A: GET /library_buddy/produce_rabbitmq_message @@ -188,8 +186,7 @@ def test_consume_trace_equality(self): assert producer_span["trace_id"] == consumer_span["trace_id"] def validate_rabbitmq_spans(self, producer_interface, consumer_interface, queue, exchange): - """ - Validates production/consumption of RabbitMQ message. + """Validates production/consumption of RabbitMQ message. It works the same for both test_produce and test_consume """ diff --git a/tests/integrations/crossed_integrations/test_sns_to_sqs.py b/tests/integrations/crossed_integrations/test_sns_to_sqs.py index e2c0adc25f..86fe6d5aa8 100644 --- a/tests/integrations/crossed_integrations/test_sns_to_sqs.py +++ b/tests/integrations/crossed_integrations/test_sns_to_sqs.py @@ -2,11 +2,9 @@ import json from utils.buddies import python_buddy -from utils import interfaces, scenarios, weblog, missing_feature, features, context, irrelevant +from utils import interfaces, scenarios, weblog, missing_feature, features, context from utils.tools import logger -from tests.integrations.utils import delete_sns_topic, delete_sqs_queue - class _Test_SNS: """Test sns compatibility with inputted datadog tracer""" @@ -58,10 +56,7 @@ def get_span(cls, interface, span_kind, queue, topic, operation): continue elif operation.lower() == "receivemessage" and span["meta"].get("language", "") == "javascript": # for nodejs we propagate from aws.response span which does not have the queue included on the span - if span["resource"] != "aws.response": - continue - # if we found the manual span, and now have the aws.response span, we will return this span - elif not manual_span_found: + if span["resource"] != "aws.response" or not manual_span_found: continue elif queue != cls.get_queue(span): continue @@ -102,29 +97,24 @@ def get_topic(span) -> str | None: return topic def setup_produce(self): - """ - send request A to weblog : this request will produce a sns message + """Send request A to weblog : this request will produce a sns message send request B to library buddy, this request will consume sns message """ - try: - message = ( - "[crossed_integrations/test_sns_to_sqs.py][SNS] Hello from SNS " - f"[{context.library.library} weblog->{self.buddy_interface.name}] test produce at {self.unique_id}" - ) - - self.production_response = weblog.get( - "/sns/produce", - params={"queue": self.WEBLOG_TO_BUDDY_QUEUE, "topic": self.WEBLOG_TO_BUDDY_TOPIC, "message": message}, - timeout=60, - ) - self.consume_response = self.buddy.get( - "/sns/consume", - params={"queue": self.WEBLOG_TO_BUDDY_QUEUE, "timeout": 60, "message": message}, - timeout=61, - ) - finally: - delete_sns_topic(self.WEBLOG_TO_BUDDY_TOPIC) - delete_sqs_queue(self.WEBLOG_TO_BUDDY_QUEUE) + message = ( + "[crossed_integrations/test_sns_to_sqs.py][SNS] Hello from SNS " + f"[{context.library.library} weblog->{self.buddy_interface.name}] test produce at {self.unique_id}" + ) + + self.production_response = weblog.get( + "/sns/produce", + params={"queue": self.WEBLOG_TO_BUDDY_QUEUE, "topic": self.WEBLOG_TO_BUDDY_TOPIC, "message": message}, + timeout=60, + ) + self.consume_response = self.buddy.get( + "/sns/consume", + params={"queue": self.WEBLOG_TO_BUDDY_QUEUE, "timeout": 60, "message": message}, + timeout=61, + ) def test_produce(self): """Check that a message produced to sns is correctly ingested by a Datadog tracer""" @@ -164,32 +154,27 @@ def test_produce_trace_equality(self): assert producer_span["trace_id"] == consumer_span["trace_id"] def setup_consume(self): - """ - send request A to library buddy : this request will produce a sns message + """Send request A to library buddy : this request will produce a sns message send request B to weblog, this request will consume sns message request A: GET /library_buddy/produce_sns_message request B: GET /weblog/consume_sns_message """ - try: - message = ( - "[crossed_integrations/test_sns_to_sqs.py][SNS] Hello from SNS " - f"[{self.buddy_interface.name}->{context.library.library} weblog] test consume at {self.unique_id}" - ) - - self.production_response = self.buddy.get( - "/sns/produce", - params={"queue": self.BUDDY_TO_WEBLOG_QUEUE, "topic": self.BUDDY_TO_WEBLOG_TOPIC, "message": message}, - timeout=60, - ) - self.consume_response = weblog.get( - "/sns/consume", - params={"queue": self.BUDDY_TO_WEBLOG_QUEUE, "timeout": 60, "message": message}, - timeout=61, - ) - finally: - delete_sns_topic(self.BUDDY_TO_WEBLOG_TOPIC) - delete_sqs_queue(self.BUDDY_TO_WEBLOG_QUEUE) + message = ( + "[crossed_integrations/test_sns_to_sqs.py][SNS] Hello from SNS " + f"[{self.buddy_interface.name}->{context.library.library} weblog] test consume at {self.unique_id}" + ) + + self.production_response = self.buddy.get( + "/sns/produce", + params={"queue": self.BUDDY_TO_WEBLOG_QUEUE, "topic": self.BUDDY_TO_WEBLOG_TOPIC, "message": message}, + timeout=60, + ) + self.consume_response = weblog.get( + "/sns/consume", + params={"queue": self.BUDDY_TO_WEBLOG_QUEUE, "timeout": 60, "message": message}, + timeout=61, + ) def test_consume(self): """Check that a message by an app instrumented by a Datadog tracer is correctly ingested""" @@ -230,8 +215,7 @@ def test_consume_trace_equality(self): assert producer_span["trace_id"] == consumer_span["trace_id"] def validate_sns_spans(self, producer_interface, consumer_interface, queue, topic): - """ - Validates production/consumption of sns message. + """Validates production/consumption of sns message. It works the same for both test_produce and test_consume """ @@ -258,7 +242,6 @@ def validate_sns_spans(self, producer_interface, consumer_interface, queue, topi @scenarios.crossed_tracing_libraries @features.aws_sns_span_creationcontext_propagation_via_message_attributes_with_dd_trace -@irrelevant(True, reason="AWS Tests are not currently stable.") class Test_SNS_Propagation(_Test_SNS): buddy_interface = interfaces.python_buddy buddy = python_buddy diff --git a/tests/integrations/crossed_integrations/test_sqs.py b/tests/integrations/crossed_integrations/test_sqs.py index feda54396d..724c4c0bd1 100644 --- a/tests/integrations/crossed_integrations/test_sqs.py +++ b/tests/integrations/crossed_integrations/test_sqs.py @@ -5,8 +5,6 @@ from utils import interfaces, scenarios, weblog, missing_feature, features, context, irrelevant from utils.tools import logger -from tests.integrations.utils import delete_sqs_queue - class _Test_SQS: """Test sqs compatibility with inputted datadog tracer""" @@ -56,10 +54,7 @@ def get_span(cls, interface, span_kind, queue, operation): elif operation.lower() == "receivemessage" and span["meta"].get("language", "") == "javascript": # for nodejs we propagate from aws.response span which does not have the queue included on the span - if span["resource"] != "aws.response": - continue - # if we found the manual span, and now have the aws.response span, we will return this span - elif not manual_span_found: + if span["resource"] != "aws.response" or not manual_span_found: continue elif queue != cls.get_queue(span): continue @@ -87,26 +82,22 @@ def get_queue(span) -> str | None: return queue def setup_produce(self): - """ - send request A to weblog : this request will produce a sqs message + """Send request A to weblog : this request will produce a sqs message send request B to library buddy, this request will consume sqs message """ - try: - message = ( - "[crossed_integrations/sqs.py][SQS] Hello from SQS " - f"[{context.library.library} weblog->{self.buddy_interface.name}] test produce: {self.unique_id}" - ) - - self.production_response = weblog.get( - "/sqs/produce", params={"queue": self.WEBLOG_TO_BUDDY_QUEUE, "message": message}, timeout=60 - ) - self.consume_response = self.buddy.get( - "/sqs/consume", - params={"queue": self.WEBLOG_TO_BUDDY_QUEUE, "timeout": 60, "message": message}, - timeout=61, - ) - finally: - delete_sqs_queue(self.WEBLOG_TO_BUDDY_QUEUE) + message = ( + "[crossed_integrations/sqs.py][SQS] Hello from SQS " + f"[{context.library.library} weblog->{self.buddy_interface.name}] test produce: {self.unique_id}" + ) + + self.production_response = weblog.get( + "/sqs/produce", params={"queue": self.WEBLOG_TO_BUDDY_QUEUE, "message": message}, timeout=60 + ) + self.consume_response = self.buddy.get( + "/sqs/consume", + params={"queue": self.WEBLOG_TO_BUDDY_QUEUE, "timeout": 60, "message": message}, + timeout=61, + ) def test_produce(self): """Check that a message produced to sqs is correctly ingested by a Datadog tracer""" @@ -151,29 +142,25 @@ def test_produce_trace_equality(self): assert producer_span["trace_id"] == consumer_span["trace_id"] def setup_consume(self): - """ - send request A to library buddy : this request will produce a sqs message + """Send request A to library buddy : this request will produce a sqs message send request B to weblog, this request will consume sqs message request A: GET /library_buddy/produce_sqs_message request B: GET /weblog/consume_sqs_message """ - try: - message = ( - "[crossed_integrations/test_sqs.py][SQS] Hello from SQS " - f"[{self.buddy_interface.name}->{context.library.library} weblog] test consume: {self.unique_id}" - ) - - self.production_response = self.buddy.get( - "/sqs/produce", params={"queue": self.BUDDY_TO_WEBLOG_QUEUE, "message": message}, timeout=60 - ) - self.consume_response = weblog.get( - "/sqs/consume", - params={"queue": self.BUDDY_TO_WEBLOG_QUEUE, "timeout": 60, "message": message}, - timeout=61, - ) - finally: - delete_sqs_queue(self.BUDDY_TO_WEBLOG_QUEUE) + message = ( + "[crossed_integrations/test_sqs.py][SQS] Hello from SQS " + f"[{self.buddy_interface.name}->{context.library.library} weblog] test consume: {self.unique_id}" + ) + + self.production_response = self.buddy.get( + "/sqs/produce", params={"queue": self.BUDDY_TO_WEBLOG_QUEUE, "message": message}, timeout=60 + ) + self.consume_response = weblog.get( + "/sqs/consume", + params={"queue": self.BUDDY_TO_WEBLOG_QUEUE, "timeout": 60, "message": message}, + timeout=61, + ) def test_consume(self): """Check that a message by an app instrumented by a Datadog tracer is correctly ingested""" @@ -212,8 +199,7 @@ def test_consume_trace_equality(self): assert producer_span["trace_id"] == consumer_span["trace_id"] def validate_sqs_spans(self, producer_interface, consumer_interface, queue): - """ - Validates production/consumption of sqs message. + """Validates production/consumption of sqs message. It works the same for both test_produce and test_consume """ @@ -235,7 +221,6 @@ def validate_sqs_spans(self, producer_interface, consumer_interface, queue): @scenarios.crossed_tracing_libraries -@irrelevant(True, reason="AWS Tests are not currently stable.") @features.aws_sqs_span_creationcontext_propagation_via_message_attributes_with_dd_trace class Test_SQS_PROPAGATION_VIA_MESSAGE_ATTRIBUTES(_Test_SQS): buddy_interface = interfaces.python_buddy @@ -248,8 +233,8 @@ class Test_SQS_PROPAGATION_VIA_MESSAGE_ATTRIBUTES(_Test_SQS): @scenarios.crossed_tracing_libraries -@irrelevant(True, reason="AWS Tests are not currently stable.") @features.aws_sqs_span_creationcontext_propagation_via_xray_header_with_dd_trace +@irrelevant("Localstack SQS does not support AWS Xray Header parsing") class Test_SQS_PROPAGATION_VIA_AWS_XRAY_HEADERS(_Test_SQS): buddy_interface = interfaces.java_buddy buddy = java_buddy diff --git a/tests/integrations/test_db_integrations_sql.py b/tests/integrations/test_db_integrations_sql.py index 1845ebb474..43934bcb86 100644 --- a/tests/integrations/test_db_integrations_sql.py +++ b/tests/integrations/test_db_integrations_sql.py @@ -1,7 +1,6 @@ # Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -import json from utils import context, bug, missing_feature, irrelevant, scenarios, features from utils.tools import logger @@ -11,10 +10,11 @@ class _BaseDatadogDbIntegrationTestClass(BaseDbIntegrationsTestClass): """Verify basic DB operations over different databases. - Check integration spans status: https://docs.google.com/spreadsheets/d/1qm3B0tJ-gG11j_MHoEd9iMXf4_DvWAGCLwmBhWCxbA8/edit#gid=623219645""" + Check integration spans status: https://docs.google.com/spreadsheets/d/1qm3B0tJ-gG11j_MHoEd9iMXf4_DvWAGCLwmBhWCxbA8/edit#gid=623219645 + """ def get_spans(self, excluded_operations=(), operations=None): - """get the spans from tracer and agent generated by all requests""" + """Get the spans from tracer and agent generated by all requests""" # yield the span from the tracer in first, as if it fails, there is a good chance that the one from the agent also fails for db_operation, request in self.get_requests(excluded_operations, operations=operations): @@ -52,7 +52,8 @@ def test_sql_success(self, excluded_operations=()): @irrelevant(library="python", reason="Python is using the correct span: db.system") def test_db_type(self, excluded_operations=()): """DEPRECATED!! Now it is db.system. An identifier for the database management system (DBMS) product being used. - Must be one of the available values: https://datadoghq.atlassian.net/wiki/spaces/APM/pages/2357395856/Span+attributes#db.system""" + Must be one of the available values: https://datadoghq.atlassian.net/wiki/spaces/APM/pages/2357395856/Span+attributes#db.system + """ for db_operation, span in self.get_spans(excluded_operations=excluded_operations): assert span["meta"]["db.type"] == self.db_service, f"Test is failing for {db_operation}" @@ -83,7 +84,8 @@ def test_runtime_id(self): @missing_feature(library="java", reason="not implemented yet") def test_db_system(self): """An identifier for the database management system (DBMS) product being used. Formerly db.type - Must be one of the available values: https://datadoghq.atlassian.net/wiki/spaces/APM/pages/2357395856/Span+attributes#db.system""" + Must be one of the available values: https://datadoghq.atlassian.net/wiki/spaces/APM/pages/2357395856/Span+attributes#db.system + """ for db_operation, span in self.get_spans(): assert span["meta"]["db.system"] == self.db_service, f"Test is failing for {db_operation}" @@ -151,7 +153,8 @@ def test_db_sql_table(self): @missing_feature(library="java", reason="not implemented yet") def test_db_row_count(self): """The number of rows/results from the query or operation. For caches and other datastores. - This tag should only set for operations that retrieve stored data, such as GET operations and queries, excluding SET and other commands not returning data.""" + This tag should only set for operations that retrieve stored data, such as GET operations and queries, excluding SET and other commands not returning data. + """ for _, span in self.get_spans(operations=["select"]): assert span["meta"]["db.row_count"] > 0, "Test is failing for select" @@ -270,7 +273,8 @@ class Test_MsSql(_BaseDatadogDbIntegrationTestClass): @missing_feature(library="nodejs", reason="Not implemented yet") def test_db_mssql_instance_name(self): """The Microsoft SQL Server instance name connecting to. This name is used to determine the port of a named instance. - This value should be set only if it’s specified on the mssql connection string.""" + This value should be set only if it's specified on the mssql connection string. + """ for db_operation, span in self.get_spans(): assert span["meta"][ diff --git a/tests/integrations/test_dsm.py b/tests/integrations/test_dsm.py index 7692ded667..669ccace1e 100644 --- a/tests/integrations/test_dsm.py +++ b/tests/integrations/test_dsm.py @@ -4,13 +4,9 @@ import base64 import json +import os -from tests.integrations.utils import ( - compute_dsm_hash, - delete_sqs_queue, - delete_kinesis_stream, - delete_sns_topic, -) +from tests.integrations.utils import compute_dsm_hash from utils import weblog, interfaces, scenarios, irrelevant, context, bug, features, missing_feature, flaky from utils.tools import logger @@ -23,6 +19,16 @@ DSM_EXCHANGE = "dsm-system-tests-exchange" DSM_ROUTING_KEY = "dsm-system-tests-routing-key" +# AWS Specific +AWS_HOST = os.getenv("SYSTEM_TESTS_AWS_URL", "") + +# TO DO CHECK RUNNER ENV FOR A SYSTEM TESTS AWS ENV INDICATING IF AWS TESTS IS LOCAL OR REMOTE + +# If the AWS host points to localstack, we are using local AWS mocking, else assume the real account +LOCAL_AWS_ACCT = "000000000000" # if 'localstack' in AWS_HOST else "601427279990" +AWS_ACCT = LOCAL_AWS_ACCT # if 'localstack' in AWS_HOST else "601427279990" +AWS_TESTING = "local" if LOCAL_AWS_ACCT == AWS_ACCT else "remote" + # AWS Kinesis Specific DSM_STREAM = "dsm-system-tests-stream" @@ -263,28 +269,23 @@ def test_dsm_rabbitmq(self): @features.datastreams_monitoring_support_for_sqs -@irrelevant(True, reason="AWS Tests are not currently stable.") @scenarios.integrations_aws class Test_DsmSQS: """Verify DSM stats points for AWS Sqs Service""" def setup_dsm_sqs(self): - try: - message = get_message("Test_DsmSQS", "sqs") - - # we can't add the time hash to node since we can't replicate the hashing algo in python and compute a hash, - # which changes for each run with the time stamp added - if context.library.library != "nodejs": - self.queue = f"{DSM_QUEUE}_{context.library.library}_{WEBLOG_VARIANT_SANITIZED}_{scenarios.integrations_aws.unique_id}" - else: - self.queue = f"{DSM_QUEUE}_{context.library.library}" - - self.r = weblog.get( - f"/dsm?integration=sqs&timeout=60&queue={self.queue}&message={message}", timeout=DSM_REQUEST_TIMEOUT - ) - finally: - if context.library.library != "nodejs": - delete_sqs_queue(self.queue) + message = get_message("Test_DsmSQS", "sqs") + + # we can't add the time hash to node since we can't replicate the hashing algo in python and compute a hash, + # which changes for each run with the time stamp added + if context.library.library != "nodejs": + self.queue = f"{DSM_QUEUE}_{context.library.library}_{WEBLOG_VARIANT_SANITIZED}_{scenarios.integrations_aws.unique_id}" + else: + self.queue = f"{DSM_QUEUE}_{context.library.library}" + + self.r = weblog.get( + f"/dsm?integration=sqs&timeout=60&queue={self.queue}&message={message}", timeout=DSM_REQUEST_TIMEOUT + ) def test_dsm_sqs(self): assert self.r.text == "ok" @@ -317,37 +318,31 @@ def test_dsm_sqs(self): @features.datastreams_monitoring_support_for_sns -@irrelevant(True, reason="AWS Tests are not currently stable.") @scenarios.integrations_aws class Test_DsmSNS: """Verify DSM stats points for AWS SNS Service""" def setup_dsm_sns(self): - try: - message = get_message("Test_DsmSNS", "sns") - - # we can't add the time hash to node since we can't replicate the hashing algo in python and compute a hash, - # which changes for each run with the time stamp added - if context.library.library != "nodejs": - self.topic = f"{DSM_TOPIC}_{context.library.library}_{WEBLOG_VARIANT_SANITIZED}_{scenarios.integrations_aws.unique_id}_raw" - self.queue = f"{DSM_QUEUE_SNS}_{context.library.library}_{WEBLOG_VARIANT_SANITIZED}_{scenarios.integrations_aws.unique_id}_raw" - else: - self.topic = f"{DSM_TOPIC}_{context.library.library}_raw" - self.queue = f"{DSM_QUEUE_SNS}_{context.library.library}_raw" - - self.r = weblog.get( - f"/dsm?integration=sns&timeout=60&queue={self.queue}&topic={self.topic}&message={message}", - timeout=DSM_REQUEST_TIMEOUT, - ) - finally: - if context.library.library != "nodejs": - delete_sns_topic(self.topic) - delete_sqs_queue(self.queue) + message = get_message("Test_DsmSNS", "sns") + + # we can't add the time hash to node since we can't replicate the hashing algo in python and compute a hash, + # which changes for each run with the time stamp added + if context.library.library != "nodejs": + self.topic = f"{DSM_TOPIC}_{context.library.library}_{WEBLOG_VARIANT_SANITIZED}_{scenarios.integrations_aws.unique_id}_raw" + self.queue = f"{DSM_QUEUE_SNS}_{context.library.library}_{WEBLOG_VARIANT_SANITIZED}_{scenarios.integrations_aws.unique_id}_raw" + else: + self.topic = f"{DSM_TOPIC}_{context.library.library}_raw" + self.queue = f"{DSM_QUEUE_SNS}_{context.library.library}_raw" + + self.r = weblog.get( + f"/dsm?integration=sns&timeout=60&queue={self.queue}&topic={self.topic}&message={message}", + timeout=DSM_REQUEST_TIMEOUT, + ) def test_dsm_sns(self): assert self.r.text == "ok" - topic = self.topic if context.library.library == "java" else f"arn:aws:sns:us-east-1:601427279990:{self.topic}" + topic = self.topic if context.library.library == "java" else f"arn:aws:sns:us-east-1:{AWS_ACCT}:{self.topic}" hash_inputs = { "default": { @@ -355,8 +350,8 @@ def test_dsm_sns(self): "tags_in": ("direction:in", f"topic:{self.queue}", "type:sqs"), }, "nodejs": { - "producer": 15466202493380574985, - "consumer": 9372735371403270535, + "producer": 15466202493380574985 if AWS_TESTING == "remote" else 3703335291192845713, + "consumer": 9372735371403270535 if AWS_TESTING == "remote" else 797339341876345963, "tags_out": ("direction:out", f"topic:{topic}", "type:sns"), "tags_in": ("direction:in", f"topic:{self.queue}", "type:sqs"), }, @@ -377,35 +372,30 @@ def test_dsm_sns(self): @features.datastreams_monitoring_support_for_kinesis -@irrelevant(True, reason="AWS Tests are not currently stable.") @scenarios.integrations_aws class Test_DsmKinesis: """Verify DSM stats points for AWS Kinesis Service""" def setup_dsm_kinesis(self): - try: - message = get_message("Test_DsmKinesis", "kinesis") - - # we can't add the time hash to node since we can't replicate the hashing algo in python and compute a hash, - # which changes for each run with the time stamp added - if context.library.library != "nodejs": - self.stream = f"{DSM_STREAM}_{context.library.library}_{WEBLOG_VARIANT_SANITIZED}_{scenarios.integrations_aws.unique_id}" - else: - self.stream = f"{DSM_STREAM}_{context.library.library}" - - self.r = weblog.get( - f"/dsm?integration=kinesis&timeout=60&stream={self.stream}&message={message}", - timeout=DSM_REQUEST_TIMEOUT, - ) - finally: - if context.library.library != "nodejs": - delete_kinesis_stream(self.stream) + message = get_message("Test_DsmKinesis", "kinesis") + + # we can't add the time hash to node since we can't replicate the hashing algo in python and compute a hash, + # which changes for each run with the time stamp added + if context.library.library != "nodejs": + self.stream = f"{DSM_STREAM}_{context.library.library}_{WEBLOG_VARIANT_SANITIZED}_{scenarios.integrations_aws.unique_id}" + else: + self.stream = f"{DSM_STREAM}_{context.library.library}" + + self.r = weblog.get( + f"/dsm?integration=kinesis&timeout=60&stream={self.stream}&message={message}", + timeout=DSM_REQUEST_TIMEOUT, + ) @missing_feature(library="java", reason="DSM is not implemented for Java AWS Kinesis.") def test_dsm_kinesis(self): assert self.r.text == "ok" - stream_arn = f"arn:aws:kinesis:us-east-1:601427279990:stream/{self.stream}" + stream_arn = f"arn:aws:kinesis:us-east-1:{AWS_ACCT}:stream/{self.stream}" hash_inputs = { "default": { @@ -465,7 +455,7 @@ def test_dsmcontext_injection_base64(self): # assert that this is base64 assert base64.b64encode(base64.b64decode(encoded_pathway_b64)) == bytes(encoded_pathway_b64, "utf-8") - encoded_pathway = base64.b64decode(bytes(encoded_pathway_b64, "utf-8")) + base64.b64decode(bytes(encoded_pathway_b64, "utf-8")) # nodejs uses big endian, others use little endian _format = " int - """ - Core FNV hash algorithm used in FNV0 and FNV1. - """ + """Core FNV hash algorithm used in FNV0 and FNV1.""" hval = hval_init for byte in data: hval = (hval * fnv_prime) % fnv_size @@ -276,9 +273,7 @@ def fnv(data, hval_init, fnv_prime, fnv_size): def fnv1_64(data): # type: (bytes) -> int - """ - Returns the 64 bit FNV-1 hash value for the given data. - """ + """Returns the 64 bit FNV-1 hash value for the given data.""" return fnv(data, FNV1_64_INIT, FNV_64_PRIME, 2**64) @@ -296,8 +291,7 @@ def get_bytes(s): def sha_hash(checkpoint_string): if isinstance(checkpoint_string, str): checkpoint_string = checkpoint_string.encode("utf-8") - hash_obj = hashlib.md5(checkpoint_string).digest()[:8] - return hash_obj + return hashlib.md5(checkpoint_string).digest()[:8] def compute_dsm_hash_nodejs(parent_hash, edge_tags): diff --git a/tests/k8s_lib_injection/test_k8s_init_image_validator.py b/tests/k8s_lib_injection/test_k8s_init_image_validator.py index a4836b69dc..910c26d416 100644 --- a/tests/k8s_lib_injection/test_k8s_init_image_validator.py +++ b/tests/k8s_lib_injection/test_k8s_init_image_validator.py @@ -1,29 +1,26 @@ -import os -import time - import requests -from utils import scenarios, features +from utils import scenarios, features, context, bug from utils.tools import logger -from utils import scenarios, context, features, bug from retry import retry class _BaseTestK8sInitImageValidator: """This test case validates the lib init image. It checks that the init image contains a correct package of the tracer. - We can use the tracer for instrument the weblog application. We use the dev test agent to check if the weblog is instrumented.""" + We can use the tracer for instrument the weblog application. We use the dev test agent to check if the weblog is instrumented. + """ @retry(delay=1, tries=10) def _get_dev_agent_traces(self): - logger.info(f"[Check traces] Checking traces:") - response = requests.get(f"http://localhost:8126/test/traces") + logger.info("[Check traces] Checking traces:") + response = requests.get("http://localhost:8126/test/traces") traces_json = response.json() assert traces_json is not None and len(traces_json) > 0, "No traces found" return traces_json @retry(delay=5, tries=20) def _check_weblog_running(self): - logger.info(f"[Check traces] Checking traces:") - response = requests.get(f"http://localhost:8080") + logger.info("[Check traces] Checking traces:") + response = requests.get("http://localhost:8080") assert response.status_code == 200, "Weblog not running" logger.info("Weblog is running") diff --git a/tests/k8s_lib_injection/test_k8s_lib_injection_djm.py b/tests/k8s_lib_injection/test_k8s_lib_injection_djm.py index 2596538a86..39444d1ce3 100644 --- a/tests/k8s_lib_injection/test_k8s_lib_injection_djm.py +++ b/tests/k8s_lib_injection/test_k8s_lib_injection_djm.py @@ -25,4 +25,4 @@ def test_spark_instrumented_with_ssi(self): f.write(json.dumps(spark_traces, indent=4)) assert len(spark_traces) > 0, "No Data Jobs Monitoring Spark application traces found" - logger.info(f"Test test_spark_instrumented_with_ssi finished") + logger.info("Test test_spark_instrumented_with_ssi finished") diff --git a/tests/k8s_lib_injection/test_k8s_lib_injection_profiling.py b/tests/k8s_lib_injection/test_k8s_lib_injection_profiling.py index 88d8bb60f2..93294003bf 100644 --- a/tests/k8s_lib_injection/test_k8s_lib_injection_profiling.py +++ b/tests/k8s_lib_injection/test_k8s_lib_injection_profiling.py @@ -10,7 +10,8 @@ class _TestK8sLibInjectionProfiling: def _check_profiling_request_sent(self, k8s_cluster_info, timeout=90): """Use test agent profiling endpoint to check if the profiling data has been sent by the injectect library. Checks the request made to the profiling endpoint (/profiling/v1/input). - The profiling post data can take between 12 and 90 seconds (12 if the library supports both env vars, 90 if it supports neither.""" + The profiling post data can take between 12 and 90 seconds (12 if the library supports both env vars, 90 if it supports neither. + """ mustend = time.time() + timeout while time.time() < mustend: response = requests.get( diff --git a/tests/otel/test_context_propagation.py b/tests/otel/test_context_propagation.py index 32d579e171..26f2779fa8 100644 --- a/tests/otel/test_context_propagation.py +++ b/tests/otel/test_context_propagation.py @@ -3,7 +3,7 @@ # Copyright 2024 Datadog, Inc. import json -from utils import weblog, interfaces, scenarios, features, incomplete_test_app +from utils import weblog, scenarios, features, incomplete_test_app @features.otel_propagators_api @@ -28,9 +28,6 @@ def test_propagation_extract(self): # assert content["baggage"] and not content["baggage"].isspace() def setup_propagation_inject(self): - inject_headers = { - "baggage": "foo=2", - } self.r = weblog.get("/otel_drop_in_default_propagator_inject") @incomplete_test_app(library="nodejs", reason="Node.js inject endpoint doesn't seem to be working.") diff --git a/tests/otel_tracing_e2e/test_e2e.py b/tests/otel_tracing_e2e/test_e2e.py index f5c31b4b1f..620d885f76 100644 --- a/tests/otel_tracing_e2e/test_e2e.py +++ b/tests/otel_tracing_e2e/test_e2e.py @@ -2,7 +2,7 @@ import os import time -from utils import context, weblog, interfaces, scenarios, irrelevant +from utils import context, weblog, interfaces, scenarios, irrelevant, features from utils.tools import logger, get_rid_from_request from utils.otel_validators.validator_trace import validate_all_traces from utils.otel_validators.validator_log import validate_log, validate_log_trace_correlation @@ -18,6 +18,7 @@ def _get_dd_trace_id(otel_trace_id: str, use_128_bits_trace_id: bool) -> int: @scenarios.otel_tracing_e2e @irrelevant(context.library != "java_otel") +@features.not_reported # FPD does not support otel libs class Test_OTelTracingE2E: def setup_main(self): self.use_128_bits_trace_id = False @@ -73,6 +74,7 @@ def test_main(self): @scenarios.otel_metric_e2e @irrelevant(context.library != "java_otel") +@features.not_reported # FPD does not support otel libs class Test_OTelMetricE2E: def setup_main(self): self.start = int(time.time()) @@ -139,6 +141,7 @@ def test_main(self): @scenarios.otel_log_e2e @irrelevant(context.library != "java_otel") +@features.not_reported # FPD does not support otel libs class Test_OTelLogE2E: def setup_main(self): self.r = weblog.get(path="/basic/log") diff --git a/tests/parametric/conftest.py b/tests/parametric/conftest.py index 82811a49c1..60b1c3bdce 100644 --- a/tests/parametric/conftest.py +++ b/tests/parametric/conftest.py @@ -1,4 +1,5 @@ import base64 +from collections.abc import Iterable import contextlib import dataclasses import os @@ -8,7 +9,7 @@ import time import datetime import hashlib -from typing import Dict, Generator, List, TextIO, TypedDict, Optional, Any +from typing import Generator, TextIO, TypedDict import urllib.parse import requests # type: ignore @@ -42,7 +43,7 @@ def test_id(request) -> str: class AgentRequest(TypedDict): method: str url: str - headers: Dict[str, str] + headers: dict[str, str] body: str @@ -65,7 +66,7 @@ def _request_token(request): @pytest.fixture -def library_env() -> Dict[str, str]: +def library_env() -> dict[str, str]: return {} @@ -77,7 +78,7 @@ def apm_test_server(request, library_env, test_id): context.scenario.parametrized_tests_metadata[request.node.nodeid] = new_env new_env.update(apm_test_server_image.env) - yield dataclasses.replace( + return dataclasses.replace( apm_test_server_image, container_name=f"{apm_test_server_image.container_name}-{test_id}", env=new_env ) @@ -85,7 +86,7 @@ def apm_test_server(request, library_env, test_id): @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield - report = outcome.get_result() + outcome.get_result() @pytest.fixture @@ -111,9 +112,9 @@ def __init__(self, base_url: str, pytest_request: None): def _url(self, path: str) -> str: return urllib.parse.urljoin(self._base_url, path) - def _write_log(self, type, json_trace): + def _write_log(self, log_type, json_trace): with open(self.log_path, "a") as log: - log.write(f"\n{type}>>>>\n") + log.write(f"\n{log_type}>>>>\n") log.write(json.dumps(json_trace)) def traces(self, clear=False, **kwargs): @@ -131,15 +132,14 @@ def set_remote_config(self, path, payload): def get_remote_config(self): resp = self._session.get(self._url("/v0.7/config")) resp_json = resp.json() - list = [] + result = [] if resp_json and resp_json["target_files"]: target_files = resp_json["target_files"] for target in target_files: path = target["path"] msg = json.loads(str(base64.b64decode(target["raw"]), encoding="utf-8")) - dict = {"path": path, "msg": msg} - list.append(dict) - return list + result.append({"path": path, "msg": msg}) + return result def add_remote_config(self, path, payload): current_rc = self.get_remote_config() @@ -149,7 +149,7 @@ def add_remote_config(self, path, payload): assert resp.status_code == 202 @staticmethod - def _build_config_path_response(config: List): + def _build_config_path_response(config: list): expires_date = datetime.datetime.strftime( datetime.datetime.now() + datetime.timedelta(days=1), "%Y-%m-%dT%H:%M:%SZ" ) @@ -181,18 +181,18 @@ def _build_config_path_response(config: List): client_configs = [] target_files = [] targets_tmp = {} - for dict in config: - client_configs.append(dict["path"]) - dict["msg_enc"] = bytes(json.dumps(dict["msg"]), encoding="utf-8") + for item in config: + client_configs.append(item["path"]) + item["msg_enc"] = bytes(json.dumps(item["msg"]), encoding="utf-8") tf = { - "path": dict["path"], - "raw": str(base64.b64encode(dict["msg_enc"]), encoding="utf-8"), + "path": item["path"], + "raw": str(base64.b64encode(item["msg_enc"]), encoding="utf-8"), } target_files.append(tf) - targets_tmp[dict["path"]] = { + targets_tmp[item["path"]] = { "custom": {"c": [""], "v": 0}, - "hashes": {"sha256": hashlib.sha256(dict["msg_enc"]).hexdigest()}, - "length": len(dict["msg_enc"]), + "hashes": {"sha256": hashlib.sha256(item["msg_enc"]).hexdigest()}, + "length": len(item["msg_enc"]), } data = { @@ -241,7 +241,7 @@ def tracestats(self, **kwargs): self._write_log("tracestats", json) return json - def requests(self, **kwargs) -> List[AgentRequest]: + def requests(self, **kwargs) -> list[AgentRequest]: resp = self._session.get(self._url("/test/session/requests"), **kwargs) json = resp.json() self._write_log("requests", json) @@ -260,7 +260,7 @@ def get_tracer_flares(self, **kwargs): self._write_log("tracerflares", json) return json - def v06_stats_requests(self) -> List[AgentRequestV06Stats]: + def v06_stats_requests(self) -> list[AgentRequestV06Stats]: raw_requests = [r for r in self.requests() if "/v0.6/stats" in r["url"]] requests = [] for raw in raw_requests: @@ -310,7 +310,7 @@ def snapshot_context(self, token, ignores=None): def wait_for_num_traces( self, num: int, clear: bool = False, wait_loops: int = 30, sort_by_start: bool = True - ) -> List[Trace]: + ) -> list[Trace]: """Wait for `num` traces to be received from the test agent. Returns after the number of traces has been received or raises otherwise after 2 seconds of polling. @@ -342,7 +342,7 @@ def wait_for_num_traces( def wait_for_num_spans( self, num: int, clear: bool = False, wait_loops: int = 30, sort_by_start: bool = True - ) -> List[Trace]: + ) -> list[Trace]: """Wait for `num` spans to be received from the test agent. Returns after the number of spans has been received or raises otherwise after 2 seconds of polling. @@ -439,7 +439,7 @@ def wait_for_rc_apply_state( time.sleep(0.01) raise AssertionError("No RemoteConfig apply status found, got requests %r" % rc_reqs) - def wait_for_rc_capabilities(self, capabilities: List[int] = [], wait_loops: int = 100): + def wait_for_rc_capabilities(self, capabilities: Iterable[int] = (), wait_loops: int = 100): """Wait for the given RemoteConfig apply state to be received by the test agent.""" rc_reqs = [] capabilities_seen = set() @@ -470,7 +470,7 @@ def wait_for_rc_capabilities(self, capabilities: List[int] = [], wait_loops: int time.sleep(0.01) raise AssertionError("No RemoteConfig capabilities found, got capabilites %r" % capabilities_seen) - def wait_for_tracer_flare(self, case_id: Optional[str] = None, clear: bool = False, wait_loops: int = 100): + def wait_for_tracer_flare(self, case_id: str | None = None, clear: bool = False, wait_loops: int = 100): """Wait for the tracer-flare to be received by the test agent.""" for i in range(wait_loops): try: @@ -489,7 +489,7 @@ def wait_for_tracer_flare(self, case_id: Optional[str] = None, clear: bool = Fal @pytest.fixture(scope="session") -def docker() -> Optional[str]: +def docker() -> str | None: """Fixture to ensure docker is ready to use on the system.""" # Redirect output to /dev/null since we just care if we get a successful response code. r = subprocess.run( @@ -507,7 +507,7 @@ def docker() -> Optional[str]: return shutil.which("docker") -@pytest.fixture() +@pytest.fixture def docker_network(test_id: str) -> Generator[str, None, None]: network = scenarios.parametric.create_docker_network(test_id) @@ -520,12 +520,12 @@ def docker_network(test_id: str) -> Generator[str, None, None]: # It's possible (why?) of having some container not stopped. # If it happen, failing here makes stdout tough to understance. # Let's ignore this, later calls will clean the mess - pass + logger.info("Failed to remove network, ignoring the error") @pytest.fixture def test_agent_port() -> int: - """returns the port exposed inside the agent container""" + """Returns the port exposed inside the agent container""" return 8126 @@ -537,7 +537,7 @@ def test_agent_log_file(request) -> Generator[TextIO, None, None]: yield f f.seek(0) agent_output = "" - for line in f.readlines(): + for line in f: # Remove log lines that are not relevant to the test if "GET /test/session/traces" in line: continue @@ -548,7 +548,7 @@ def test_agent_log_file(request) -> Generator[TextIO, None, None]: if "GET /test/session/apmtelemetry" in line: continue agent_output += line - request.node._report_sections.append(("teardown", f"Test Agent Output", agent_output)) + request.node._report_sections.append(("teardown", "Test Agent Output", agent_output)) @pytest.fixture @@ -641,7 +641,7 @@ def test_library( "DD_TRACE_AGENT_URL": f"http://{test_agent_container_name}:{test_agent_port}", "DD_AGENT_HOST": test_agent_container_name, "DD_TRACE_AGENT_PORT": test_agent_port, - "APM_TEST_CLIENT_SERVER_PORT": apm_test_server.container_port, + "APM_TEST_CLIENT_SERVER_PORT": str(apm_test_server.container_port), "DD_TRACE_OTEL_ENABLED": "true", } for k, v in apm_test_server.env.items(): diff --git a/tests/parametric/test_128_bit_traceids.py b/tests/parametric/test_128_bit_traceids.py index e17ce71110..b127f3ecc2 100644 --- a/tests/parametric/test_128_bit_traceids.py +++ b/tests/parametric/test_128_bit_traceids.py @@ -422,7 +422,7 @@ def test_w3c_128_bit_propagation_tid_in_chunk_root(self, test_agent, test_librar """Ensure that root span contains the tid.""" with test_library: with test_library.dd_start_span(name="parent", service="service", resource="resource") as parent: - with test_library.dd_start_span(name="child", service="service", parent_id=parent.span_id) as child: + with test_library.dd_start_span(name="child", service="service", parent_id=parent.span_id): pass traces = test_agent.wait_for_num_traces(1, clear=True, sort_by_start=False) @@ -608,7 +608,7 @@ def check_128_bit_trace_id(header_trace_id, span_trace_id, dd_p_tid): def validate_dd_p_tid(dd_p_tid): """Validate that dd_p_tid is well-formed.""" - assert not dd_p_tid is None + assert dd_p_tid is not None assert len(dd_p_tid) == 16 assert dd_p_tid != ZERO16 assert dd_p_tid[8:16] == ZERO8 diff --git a/tests/parametric/test_config_consistency.py b/tests/parametric/test_config_consistency.py index eb66e3593e..fb4f77f172 100644 --- a/tests/parametric/test_config_consistency.py +++ b/tests/parametric/test_config_consistency.py @@ -1,11 +1,10 @@ -""" -Test configuration consistency for features across supported APM SDKs. -""" +"""Test configuration consistency for features across supported APM SDKs.""" from urllib.parse import urlparse import pytest -from utils import scenarios, features, context, missing_feature, irrelevant, flaky +from utils import scenarios, features, context, missing_feature, irrelevant, flaky, bug from utils.parametric.spec.trace import find_span_in_traces, find_only_span +import os parametrize = pytest.mark.parametrize @@ -126,8 +125,7 @@ def test_specific_env(self, library_env, test_agent, test_library): @scenarios.parametric @features.tracing_configuration_consistency class Test_Config_TraceAgentURL: - """ - DD_TRACE_AGENT_URL is validated using the tracer configuration. + """DD_TRACE_AGENT_URL is validated using the tracer configuration. This approach avoids the need to modify the setup file to create additional containers at the specified URL, which would be unnecessarily complex. """ @@ -232,13 +230,17 @@ def test_default_trace_rate_limit(self, library_env, test_agent, test_library): context.library == "php", reason="PHP backfill model does not support strict two-trace limit, see test below for its behavior", ) - @parametrize("library_env", [{"DD_TRACE_RATE_LIMIT": "1", "DD_TRACE_SAMPLE_RATE": "1"}]) + @parametrize( + "library_env", + [{"DD_TRACE_RATE_LIMIT": "1", "DD_TRACE_SAMPLE_RATE": "1", "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":1}]'}], + ) @flaky(library="java", reason="APMAPI-908") + @bug(context.library == "golang", reason="APMAPI-1030") def test_setting_trace_rate_limit_strict(self, library_env, test_agent, test_library): with test_library: - with test_library.dd_start_span(name="s1") as s1: + with test_library.dd_start_span(name="s1"): pass - with test_library.dd_start_span(name="s2") as s2: + with test_library.dd_start_span(name="s2"): pass traces = test_agent.wait_for_num_traces(2) @@ -251,9 +253,9 @@ def test_setting_trace_rate_limit_strict(self, library_env, test_agent, test_lib @parametrize("library_env", [{"DD_TRACE_RATE_LIMIT": "1"}]) def test_trace_rate_limit_without_trace_sample_rate(self, library_env, test_agent, test_library): with test_library: - with test_library.dd_start_span(name="s1") as s1: + with test_library.dd_start_span(name="s1"): pass - with test_library.dd_start_span(name="s2") as s2: + with test_library.dd_start_span(name="s2"): pass traces = test_agent.wait_for_num_traces(2) @@ -262,7 +264,10 @@ def test_trace_rate_limit_without_trace_sample_rate(self, library_env, test_agen assert trace_0_sampling_priority == 1 assert trace_1_sampling_priority == 1 - @parametrize("library_env", [{"DD_TRACE_RATE_LIMIT": "1", "DD_TRACE_SAMPLE_RATE": "1"}]) + @parametrize( + "library_env", + [{"DD_TRACE_RATE_LIMIT": "1", "DD_TRACE_SAMPLE_RATE": "1", "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":1}]'}], + ) def test_setting_trace_rate_limit(self, library_env, test_agent, test_library): # In PHP the rate limiter is continuously backfilled, i.e. if the rate limit is 2, and 0.2 seconds have passed, an allowance of 0.4 is backfilled. # As long as the amount of allowance is greater than zero, the request is allowed. @@ -272,7 +277,7 @@ def test_setting_trace_rate_limit(self, library_env, test_agent, test_library): with test_library: # Generate three traces to demonstrate rate limiting in PHP's backfill model for i in range(3): - with test_library.dd_start_span(name=f"s{i+1}") as span: + with test_library.dd_start_span(name=f"s{i+1}"): pass traces = test_agent.wait_for_num_traces(3) @@ -281,30 +286,36 @@ def test_setting_trace_rate_limit(self, library_env, test_agent, test_library): ), "Expected at least one trace to be rate-limited with sampling priority -1." -def tag_scenarios(): - env1: dict = {"DD_TAGS": "key1:value1,key2:value2"} - env2: dict = {"DD_TAGS": "key1:value1 key2:value2"} - env3: dict = {"DD_TAGS": "env:test aKey:aVal bKey:bVal cKey:"} - env4: dict = {"DD_TAGS": "env:test,aKey:aVal,bKey:bVal,cKey:"} - env5: dict = {"DD_TAGS": "env:test,aKey:aVal bKey:bVal cKey:"} - env6: dict = {"DD_TAGS": "env:test bKey :bVal dKey: dVal cKey:"} - env7: dict = {"DD_TAGS": "env :test, aKey : aVal bKey:bVal cKey:"} - env8: dict = {"DD_TAGS": "env:keyWithA:Semicolon bKey:bVal cKey"} - env9: dict = {"DD_TAGS": "env:keyWith: , , Lots:Of:Semicolons "} - env10: dict = {"DD_TAGS": "a:b,c,d"} - env11: dict = {"DD_TAGS": "a,1"} - env12: dict = {"DD_TAGS": "a:b:c:d"} - return parametrize("library_env", [env1, env2, env3, env4, env5, env6, env7, env8, env9, env10, env11, env12]) +tag_scenarios: dict = { + "key1:value1,key2:value2": [("key1", "value1"), ("key2", "value2")], + "key1:value1 key2:value2": [("key1", "value1"), ("key2", "value2")], + "env:test aKey:aVal bKey:bVal cKey:": [("env", "test"), ("aKey", "aVal"), ("bKey", "bVal"), ("cKey", "")], + "env:test,aKey:aVal,bKey:bVal,cKey:": [("env", "test"), ("aKey", "aVal"), ("bKey", "bVal"), ("cKey", "")], + "env:test,aKey:aVal bKey:bVal cKey:": [("env", "test"), ("aKey", "aVal bKey:bVal cKey:")], + "env:test bKey :bVal dKey: dVal cKey:": [ + ("env", "test"), + ("bKey", ""), + ("dKey", ""), + ("dVal", ""), + ("cKey", ""), + ], + "env :test, aKey : aVal bKey:bVal cKey:": [("env", "test"), ("aKey", "aVal bKey:bVal cKey:")], + "env:keyWithA:Semicolon bKey:bVal cKey": [("env", "keyWithA:Semicolon"), ("bKey", "bVal"), ("cKey", "")], + "env:keyWith: , , Lots:Of:Semicolons ": [("env", "keyWith:"), ("Lots", "Of:Semicolons")], + "a:b,c,d": [("a", "b"), ("c", ""), ("d", "")], + "a,1": [("a", ""), ("1", "")], + "a:b:c:d": [("a", "b:c:d")], +} @scenarios.parametric @features.tracing_configuration_consistency class Test_Config_Tags: - @tag_scenarios() + @parametrize("library_env", [{"DD_TAGS": key} for key in tag_scenarios]) def test_comma_space_tag_separation(self, library_env, test_agent, test_library): expected_local_tags = [] if "DD_TAGS" in library_env: - expected_local_tags = _parse_dd_tags(library_env["DD_TAGS"]) + expected_local_tags = tag_scenarios[library_env["DD_TAGS"]] with test_library: with test_library.dd_start_span(name="sample_span"): pass @@ -336,20 +347,6 @@ def test_dd_service_override(self, library_env, test_agent, test_library): assert span["meta"]["version"] == "5.2.0" -def _parse_dd_tags(tags): - result = [] - key_value_pairs = tags.split(",") if "," in tags else tags.split() # First try to split by comma, then by space - for pair in key_value_pairs: - if ":" in pair: - key, value = pair.split(":", 1) - else: - key, value = pair, "" - key, value = key.strip(), value.strip() - if key: - result.append((key, value)) - return result - - @scenarios.parametric @features.tracing_configuration_consistency class Test_Config_Dogstatsd: @@ -379,3 +376,38 @@ def test_dogstatsd_custom_port(self, library_env, test_agent, test_library): with test_library as t: resp = t.config() assert resp["dd_dogstatsd_port"] == "8150" + + +@scenarios.parametric +@features.stable_configuration_support +class Test_Stable_Config_Default: + """Verify that stable config works as intended""" + + @missing_feature( + context.library in ["ruby", "cpp", "dotnet", "golang", "java", "nodejs", "php", "python"], + reason="does not support stable configurations yet", + ) + @pytest.mark.parametrize("library_env", [{"STABLE_CONFIG_SELECTOR": "true", "DD_SERVICE": "not-my-service"}]) + def test_config_stable(self, library_env, test_agent, test_library): + path = "/etc/datadog-agent/managed/datadog-apm-libraries/stable/libraries_config.yaml" + stable_config = """ +rules: + - selectors: + - origin: environment_variables + matches: + - STABLE_CONFIG_SELECTOR=true + operator: equals + configuration: + DD_SERVICE: my-service +""" + + with test_library: + success, message = test_library.container_exec_run( + f"bash -c \"mkdir -p {os.path.dirname(path)} && printf '{stable_config}' | tee {path}\"" + ) + assert success, message + test_library.container_restart() + config = test_library.config() + assert ( + config["dd_service"] == "my-service" + ), f"Service name is '{config["dd_service"]}' instead of 'my-service'" diff --git a/tests/parametric/test_crashtracking.py b/tests/parametric/test_crashtracking.py index 249fcd2b53..8e9d3d349c 100644 --- a/tests/parametric/test_crashtracking.py +++ b/tests/parametric/test_crashtracking.py @@ -1,12 +1,10 @@ -""" -Test the crashtracking (RC) feature of the APM libraries. -""" +"""Test the crashtracking (RC) feature of the APM libraries.""" import pytest import json import base64 -from utils import bug, context, features, irrelevant, missing_feature, rfc, scenarios, flaky +from utils import bug, context, features, scenarios @scenarios.parametric @@ -18,7 +16,7 @@ def test_report_crash(self, test_agent, test_library): test_library.crash() event = test_agent.wait_for_telemetry_event("logs", wait_loops=400) - assert self.is_crash_report(test_library, event) + self.assert_crash_report(test_library, event) @pytest.mark.parametrize("library_env", [{"DD_CRASHTRACKING_ENABLED": "false"}]) def test_disable_crashtracking(self, test_agent, test_library): @@ -30,7 +28,8 @@ def test_disable_crashtracking(self, test_agent, test_library): event = json.loads(base64.b64decode(req["body"])) if event["request_type"] == "logs": - assert self.is_crash_report(test_library, event) is False + with pytest.raises(AssertionError): + self.assert_crash_report(test_library, event) @bug(library="java", reason="APMLP-302") @pytest.mark.parametrize("library_env", [{"DD_CRASHTRACKING_ENABLED": "true"}]) @@ -45,15 +44,11 @@ def test_telemetry_timeout(self, test_agent, test_library, apm_test_server): finally: test_agent.set_trace_delay(0) - def is_crash_report(self, test_library, event) -> bool: - if not isinstance(event.get("payload"), list): - return False - if not event["payload"]: - return False - if not isinstance(event["payload"][0], dict): - return False - if "tags" not in event["payload"][0]: - return False + def assert_crash_report(self, test_library, event): + assert isinstance(event.get("payload"), list), event.get("payload") + assert event["payload"], event["payload"] + assert isinstance(event["payload"][0], dict), event["payload"][0] + assert "tags" in event["payload"][0] tags = event["payload"][0]["tags"] print("tags: ", tags) @@ -64,6 +59,7 @@ def is_crash_report(self, test_library, event) -> bool: # Most client libraries are using libdatadog so tesing signum tag would work, # but Java isn't so we end up with testing for severity tag. if test_library.lang == "java": - return "severity" in tags_dict and tags_dict["severity"] == "crash" - - return "signum" in tags_dict + assert "severity" in tags_dict and tags_dict["severity"] == "crash", tags_dict + else: + # According to the RFC, si_signo should be set to 11 for SIGSEGV + assert "signum" in tags_dict or ("si_signo" in tags_dict and tags_dict["si_signo"] == "11"), tags_dict diff --git a/tests/parametric/test_dynamic_configuration.py b/tests/parametric/test_dynamic_configuration.py index 67362f04a8..86b90a41f0 100644 --- a/tests/parametric/test_dynamic_configuration.py +++ b/tests/parametric/test_dynamic_configuration.py @@ -1,9 +1,7 @@ -""" -Test the dynamic configuration via Remote Config (RC) feature of the APM libraries. -""" +"""Test the dynamic configuration via Remote Config (RC) feature of the APM libraries.""" import json -from typing import Any, Dict, List +from typing import Any import pytest @@ -33,7 +31,7 @@ } -def send_and_wait_trace(test_library, test_agent, **span_kwargs) -> List[Span]: +def send_and_wait_trace(test_library, test_agent, **span_kwargs) -> list[Span]: with test_library.dd_start_span(**span_kwargs) as s1: pass test_library.dd_flush() @@ -41,7 +39,7 @@ def send_and_wait_trace(test_library, test_agent, **span_kwargs) -> List[Span]: return find_trace(traces, s1.trace_id) -def _default_config(service: str, env: str) -> Dict[str, Any]: +def _default_config(service: str, env: str) -> dict[str, Any]: return { "action": "enable", "revision": 1698167126064, @@ -61,21 +59,21 @@ def _default_config(service: str, env: str) -> Dict[str, Any]: } -def _set_rc(test_agent, config: Dict[str, Any]) -> None: +def _set_rc(test_agent, config: dict[str, Any]) -> None: cfg_id = hash(json.dumps(config)) config["id"] = str(cfg_id) test_agent.set_remote_config(path="datadog/2/APM_TRACING/%s/config" % cfg_id, payload=config) -def _create_rc_config(config_overrides: Dict[str, Any]) -> Dict: +def _create_rc_config(config_overrides: dict[str, Any]) -> dict: rc_config = _default_config(TEST_SERVICE, TEST_ENV) for k, v in config_overrides.items(): rc_config["lib_config"][k] = v return rc_config -def set_and_wait_rc(test_agent, config_overrides: Dict[str, Any]) -> Dict: +def set_and_wait_rc(test_agent, config_overrides: dict[str, Any]) -> dict: """Helper to create an RC configuration with the given settings and wait for it to be applied. It is assumed that the configuration is successfully applied. @@ -89,7 +87,7 @@ def set_and_wait_rc(test_agent, config_overrides: Dict[str, Any]) -> Dict: return test_agent.wait_for_rc_apply_state("APM_TRACING", state=RemoteConfigApplyState.ACKNOWLEDGED, clear=True) -def assert_sampling_rate(trace: List[Dict], rate: float): +def assert_sampling_rate(trace: list[dict], rate: float): """Asserts that a trace returned from the test agent is consistent with the given sample rate. This function assumes that all traces are sent to the agent regardless of sample rate. @@ -103,7 +101,7 @@ def assert_sampling_rate(trace: List[Dict], rate: float): assert span["metrics"].get("_dd.rule_psr", 1.0) == pytest.approx(rate) -def is_sampled(trace: List[Dict]): +def is_sampled(trace: list[dict]): """Asserts that a trace returned from the test agent is consistent with the given sample rate. This function assumes that all traces are sent to the agent regardless of sample rate. @@ -251,6 +249,7 @@ def test_trace_sampling_rate_override_default(self, test_agent, test_library): @parametrize("library_env", [{"DD_TRACE_SAMPLE_RATE": r, **DEFAULT_ENVVARS} for r in ["0.1", "1.0"]]) @bug(library="cpp", reason="APMAPI-863") @flaky(context.library >= "dotnet@2.56.0", reason="APMAPI-179") + @irrelevant(context.library == "python", reason="DD_TRACE_SAMPLE_RATE was removed in 3.x") def test_trace_sampling_rate_override_env(self, library_env, test_agent, test_library): """The RC sampling rate should override the environment variable. @@ -361,9 +360,7 @@ class TestDynamicConfigV1_EmptyServiceTargets: ], ) def test_not_match_service_target_empty_env(self, library_env, test_agent, test_library): - """ - Test that the library reports a non-erroneous apply_state when DD_SERVICE or DD_ENV are empty. - """ + """Test that the library reports a non-erroneous apply_state when DD_SERVICE or DD_ENV are empty.""" _set_rc(test_agent, _default_config(TEST_SERVICE, TEST_ENV)) rc_args = {} @@ -408,8 +405,7 @@ class TestDynamicConfigV1_ServiceTargets: @irrelevant(library="java", reason="APMAPI-1003") @irrelevant(library="cpp", reason="APMAPI-1003") def test_not_match_service_target(self, library_env, test_agent, test_library): - """ - This is an old behavior, see APMAPI-1003 + """This is an old behavior, see APMAPI-1003 ---- @@ -649,7 +645,7 @@ def test_trace_sampling_rules_override_rate(self, library_env, test_agent, test_ assert span["meta"]["_dd.p.dm"] == "-3" # Unset RC to ensure local settings - set_and_wait_rc(test_agent, config_overrides={"tracing_sampling_rules": None, "tracing_sampling_rules": None}) + set_and_wait_rc(test_agent, config_overrides={"tracing_sampling_rules": None}) trace = get_sampled_trace(test_library, test_agent, service="other_service", name="op_name") assert_sampling_rate(trace, DEFAULT_SAMPLE_RATE) diff --git a/tests/parametric/test_headers_b3.py b/tests/parametric/test_headers_b3.py index 38126b7c2f..e2754cafd2 100644 --- a/tests/parametric/test_headers_b3.py +++ b/tests/parametric/test_headers_b3.py @@ -5,7 +5,7 @@ from utils.parametric.spec.trace import SAMPLING_PRIORITY_KEY, ORIGIN from utils.parametric.spec.trace import span_has_no_parent from utils.parametric.spec.trace import find_only_span -from utils import missing_feature, context, scenarios, features, bug, irrelevant +from utils import missing_feature, context, scenarios, features, irrelevant from utils.tools import logger parametrize = pytest.mark.parametrize diff --git a/tests/parametric/test_headers_b3multi.py b/tests/parametric/test_headers_b3multi.py index bec18f6d52..9bc60b29d1 100644 --- a/tests/parametric/test_headers_b3multi.py +++ b/tests/parametric/test_headers_b3multi.py @@ -5,7 +5,7 @@ from utils.parametric.spec.trace import SAMPLING_PRIORITY_KEY, ORIGIN from utils.parametric.spec.trace import span_has_no_parent from utils.parametric.spec.trace import find_only_span -from utils import missing_feature, irrelevant, context, scenarios, features, bug +from utils import missing_feature, context, scenarios, features parametrize = pytest.mark.parametrize @@ -45,7 +45,7 @@ def test_headers_b3multi_extract_valid(self, test_agent, test_library): and activated properly. """ with test_library: - headers = test_library.dd_make_child_span_and_get_headers( + test_library.dd_make_child_span_and_get_headers( [ ["x-b3-traceid", "000000000000000000000000075bcd15"], ["x-b3-spanid", "000000003ade68b1"], @@ -63,7 +63,7 @@ def test_headers_b3multi_extract_valid(self, test_agent, test_library): def test_headers_b3multi_extract_invalid(self, test_agent, test_library): """Ensure that invalid b3multi distributed tracing headers are not extracted.""" with test_library: - headers = test_library.dd_make_child_span_and_get_headers( + test_library.dd_make_child_span_and_get_headers( [["x-b3-traceid", "0"], ["x-b3-spanid", "0"], ["x-b3-sampled", "1"]] ) diff --git a/tests/parametric/test_headers_baggage.py b/tests/parametric/test_headers_baggage.py index 04d88b3186..c4d8e56a14 100644 --- a/tests/parametric/test_headers_baggage.py +++ b/tests/parametric/test_headers_baggage.py @@ -1,14 +1,9 @@ -from operator import le -from py import test -from requests import head # type: ignore -from utils.parametric.spec.trace import SAMPLING_PRIORITY_KEY, ORIGIN -from utils.parametric.spec.trace import span_has_no_parent +from utils._decorators import irrelevant from utils.parametric.spec.trace import find_only_span -from utils import features, scenarios, bug, context +from utils import features, scenarios, context, missing_feature + from typing import Any import pytest -from utils.parametric.spec.trace import SAMPLING_PRIORITY_KEY, ORIGIN -from utils.parametric.spec.trace import find_only_span parametrize = pytest.mark.parametrize @@ -40,10 +35,11 @@ def test_headers_baggage_default_D001(self, test_agent, test_library): span = find_only_span(test_agent.wait_for_num_traces(1)) assert span.get("trace_id") == 123456789 assert span.get("parent_id") == 987654321 - assert "baggage" in headers.keys() + assert "baggage" in headers assert headers["baggage"] == "foo=bar" @only_baggage_enabled() + @missing_feature(context.library == "nodejs", reason="pausing on this feature to avoid app crashes") def test_headers_baggage_only_D002(self, test_library): """Ensure that only baggage headers are injected when baggage is the only enabled propagation style.""" with test_library: @@ -51,11 +47,15 @@ def test_headers_baggage_only_D002(self, test_library): [["x-datadog-trace-id", "123456789"], ["baggage", "foo=bar"]] ) - assert "x-datadog-trace-id" not in headers.keys() - assert "x-datadog-parent-id" not in headers.keys() - assert "baggage" in headers.keys() + assert "x-datadog-trace-id" not in headers + assert "x-datadog-parent-id" not in headers + assert "baggage" in headers assert headers["baggage"] == "foo=bar" + @irrelevant( + context.library in ("cpp", "goland", "java", "ruby", "php"), + reason="The current default behaviour matches the future baggage disabled behaviour, so we can't activate this test without causing a false easy win", + ) @disable_baggage() def test_baggage_disable_settings_D003(self, test_agent, test_library): """Ensure that baggage headers are not injected when baggage is disabled and does not interfere with other headers.""" @@ -67,10 +67,10 @@ def test_baggage_disable_settings_D003(self, test_agent, test_library): span = find_only_span(test_agent.wait_for_num_traces(1)) assert span.get("trace_id") == 123456789 assert span.get("parent_id") == 987654321 - assert "baggage" not in headers.keys() + assert "baggage" not in headers def test_baggage_inject_header_D004(self, test_library): - """testing baggage header injection, proper concatenation of key value pairs, and encoding""" + """Testing baggage header injection, proper concatenation of key value pairs, and encoding""" with test_library.dd_start_span(name="test_baggage_set_D004") as span: span.set_baggage("foo", "bar") span.set_baggage("baz", "qux") @@ -90,8 +90,11 @@ def test_baggage_inject_header_D004(self, test_library): assert "serverNode=DF%2028" in baggage_items assert "%22%2C%3B%5C%28%29%2F%3A%3C%3D%3E%3F%40%5B%5D%7B%7D=%22%2C%3B%5C" in baggage_items + @missing_feature( + context.library == "nodejs", reason="`dd_extract_headers_and_make_child_span` does not work with only baggage" + ) def test_baggage_extract_header_D005(self, test_library): - """testing baggage header extraction and decoding""" + """Testing baggage header extraction and decoding""" with test_library.dd_extract_headers_and_make_child_span( "test_baggage_extract_header_D005", @@ -125,6 +128,10 @@ def test_baggage_set_D006(self, test_library): assert span.get_baggage("userId") == "Amélie" assert span.get_baggage("serverNode") == "DF 28" + @irrelevant( + context.library in ("cpp", "goland", "java", "ruby", "php"), + reason="The current default behaviour matches the future baggage disabled behaviour, so we can't activate this test without causing a false easy win", + ) @disable_baggage() def test_baggage_set_disabled_D007(self, test_library): """Ensure that baggage headers are not injected when baggage is disabled.""" @@ -135,8 +142,11 @@ def test_baggage_set_disabled_D007(self, test_library): headers = test_library.dd_inject_headers(span.span_id) assert not any("baggage" in item for item in headers) + @missing_feature( + context.library == "nodejs", reason="`dd_extract_headers_and_make_child_span` does not work with only baggage" + ) def test_baggage_get_D008(self, test_library): - """testing baggage API get_baggage""" + """Testing baggage API get_baggage""" with test_library.dd_extract_headers_and_make_child_span( "test_baggage_get_D008", [["baggage", "userId=Am%C3%A9lie,serverNode=DF%2028"]] ) as span: @@ -147,8 +157,11 @@ def test_baggage_get_D008(self, test_library): assert span.get_baggage("userId") == "Amélie" assert span.get_baggage("serverNode") == "DF 28" + @missing_feature( + context.library == "nodejs", reason="`dd_extract_headers_and_make_child_span` does not work with only baggage" + ) def test_baggage_get_all_D009(self, test_library): - """testing baggage API get_all_baggage""" + """Testing baggage API get_all_baggage""" with test_library.dd_extract_headers_and_make_child_span( "test_baggage_get_all_D009", [["baggage", "foo=bar"]] ) as span: @@ -159,7 +172,7 @@ def test_baggage_get_all_D009(self, test_library): assert baggage == {"foo": "bar", "baz": "qux", "userId": "Amélie", "serverNode": "DF 28"} def test_baggage_remove_D010(self, test_library): - """testing baggage API remove_baggage""" + """Testing baggage API remove_baggage""" with test_library.dd_start_span(name="test_baggage_remove_D010") as span: span.set_baggage("baz", "qux") span.set_baggage("userId", "Amélie") @@ -171,42 +184,74 @@ def test_baggage_remove_D010(self, test_library): assert span.get_all_baggage() == {} def test_baggage_remove_all_D011(self, test_library): - """testing baggage API remove_all_baggage""" + """Testing baggage API remove_all_baggage""" with test_library.dd_start_span(name="test_baggage_remove_all_D011") as span: span.set_baggage("foo", "bar") span.set_baggage("baz", "qux") span.remove_all_baggage() assert span.get_all_baggage() == {} - def test_baggage_malformed_headers_D012(self, test_library, test_agent): + def _assert_valid_baggage(self, test_library): + """Helper function to confirm that a valid baggage header is set + when calling dd_make_child_span_and_get_headers. + """ + with test_library: + headers = test_library.dd_make_child_span_and_get_headers([["baggage", "foo=valid"]]) + assert "baggage" in headers + + @missing_feature( + context.library == "nodejs", + reason="`dd_make_child_span_and_get_headers` calls `dd_extract_headers_and_make_child_span`, which does not work with only baggage", + ) + def test_baggage_malformed_headers_D012(self, test_library): """Ensure that malformed baggage headers are handled properly. Unable to use get_baggage functions because it does not return anything""" + Test_Headers_Baggage._assert_valid_baggage(self, test_library) + with test_library: headers = test_library.dd_make_child_span_and_get_headers( [["baggage", "no-equal-sign,foo=gets-dropped-because-previous-pair-is-malformed"]], ) - assert "baggage" not in headers.keys() + assert "baggage" not in headers + @missing_feature( + context.library == "nodejs", + reason="`dd_make_child_span_and_get_headers` calls `dd_extract_headers_and_make_child_span`, which does not work with only baggage", + ) def test_baggage_malformed_headers_D013(self, test_library): """Ensure that malformed baggage headers are handled properly. Unable to use get_baggage functions because it does not return anything""" + Test_Headers_Baggage._assert_valid_baggage(self, test_library) + with test_library: headers = test_library.dd_make_child_span_and_get_headers([["baggage", "=no-key"]]) - assert "baggage" not in headers.keys() + assert "baggage" not in headers + @missing_feature( + context.library == "nodejs", + reason="`dd_make_child_span_and_get_headers` calls `dd_extract_headers_and_make_child_span`, which does not work with only baggage", + ) def test_baggage_malformed_headers_D014(self, test_library): + Test_Headers_Baggage._assert_valid_baggage(self, test_library) + with test_library: headers = test_library.dd_make_child_span_and_get_headers([["baggage", "no-value="]]) - assert "baggage" not in headers.keys() + assert "baggage" not in headers + @missing_feature( + context.library == "nodejs", + reason="`dd_make_child_span_and_get_headers` calls `dd_extract_headers_and_make_child_span`, which does not work with only baggage", + ) def test_baggage_malformed_headers_D015(self, test_library): + Test_Headers_Baggage._assert_valid_baggage(self, test_library) + with test_library: headers = test_library.dd_make_child_span_and_get_headers( [["baggage", "foo=gets-dropped-because-subsequent-pair-is-malformed,="]], ) - assert "baggage" not in headers.keys() + assert "baggage" not in headers def test_baggageheader_maxitems_inject_D016(self, test_library): """Ensure that baggage headers are not injected when the number of baggage items exceeds the maximum number of items.""" diff --git a/tests/parametric/test_headers_datadog.py b/tests/parametric/test_headers_datadog.py index 7a60fa0c4b..2c623fc8ac 100644 --- a/tests/parametric/test_headers_datadog.py +++ b/tests/parametric/test_headers_datadog.py @@ -1,7 +1,7 @@ from utils.parametric.spec.trace import SAMPLING_PRIORITY_KEY, ORIGIN from utils.parametric.spec.trace import span_has_no_parent from utils.parametric.spec.trace import find_only_span -from utils import features, scenarios, bug, context +from utils import features, scenarios @features.datadog_headers_propagation @@ -12,7 +12,7 @@ def test_distributed_headers_extract_datadog_D001(self, test_agent, test_library and activated properly. """ with test_library: - headers = test_library.dd_make_child_span_and_get_headers( + test_library.dd_make_child_span_and_get_headers( [ ["x-datadog-trace-id", "123456789"], ["x-datadog-parent-id", "987654321"], @@ -34,7 +34,7 @@ def test_distributed_headers_extract_datadog_D001(self, test_agent, test_library def test_distributed_headers_extract_datadog_invalid_D002(self, test_agent, test_library): """Ensure that invalid Datadog distributed tracing headers are not extracted.""" with test_library: - headers = test_library.dd_make_child_span_and_get_headers( + test_library.dd_make_child_span_and_get_headers( [ ["x-datadog-trace-id", "0"], ["x-datadog-parent-id", "0"], @@ -76,7 +76,7 @@ def test_distributed_headers_propagate_datadog_D004(self, test_agent, test_libra ], ) - span = find_only_span(test_agent.wait_for_num_traces(1)) + find_only_span(test_agent.wait_for_num_traces(1)) assert headers["x-datadog-trace-id"] == "123456789" assert headers["x-datadog-parent-id"] != "987654321" assert headers["x-datadog-sampling-priority"] == "2" diff --git a/tests/parametric/test_headers_none.py b/tests/parametric/test_headers_none.py index 3dea59228c..66e4aef3a5 100644 --- a/tests/parametric/test_headers_none.py +++ b/tests/parametric/test_headers_none.py @@ -4,7 +4,7 @@ from utils.parametric.spec.trace import SAMPLING_PRIORITY_KEY, ORIGIN from utils.parametric.spec.trace import find_only_span -from utils import missing_feature, context, scenarios, features, bug +from utils import scenarios, features parametrize = pytest.mark.parametrize diff --git a/tests/parametric/test_headers_precedence.py b/tests/parametric/test_headers_precedence.py index 814536ace0..19ed5d49bf 100644 --- a/tests/parametric/test_headers_precedence.py +++ b/tests/parametric/test_headers_precedence.py @@ -2,8 +2,7 @@ import pytest -from utils.parametric.spec.tracecontext import get_tracecontext, TRACECONTEXT_FLAGS_SET -from utils.parametric.spec.trace import retrieve_span_links, find_span_in_traces +from utils.parametric.spec.tracecontext import get_tracecontext from utils import bug, missing_feature, context, irrelevant, scenarios, features parametrize = pytest.mark.parametrize @@ -672,8 +671,7 @@ def test_headers_precedence_propagationstyle_tracecontext_first_extract_first_tr def _test_headers_precedence_propagationstyle_includes_tracecontext_correctly_propagates_tracestate( self, test_agent, test_library, prefer_tracecontext, extract_first ): - """ - This test asserts that ALL the propagators are executed in the specified + """This test asserts that ALL the propagators are executed in the specified order, and the the first propagator to extract a valid trace context determines the trace-id, parent-id, and supplemental information such as x-datadog-sampling-priority, x-datadog-tags, tracestate, etc. @@ -749,7 +747,7 @@ def _test_headers_precedence_propagationstyle_includes_tracecontext_correctly_pr ], ) - traces = test_agent.wait_for_num_traces(num=5) + test_agent.wait_for_num_traces(num=5) # 1) Datadog and tracecontext headers, trace-id and span-id match, tracestate is present # Note: This is expected to be the most frequent case diff --git a/tests/parametric/test_headers_tracecontext.py b/tests/parametric/test_headers_tracecontext.py index 0a5e5c63a2..65bfe215ed 100644 --- a/tests/parametric/test_headers_tracecontext.py +++ b/tests/parametric/test_headers_tracecontext.py @@ -38,8 +38,7 @@ def temporary_enable_optin_tracecontext_single_key() -> Any: class Test_Headers_Tracecontext: @temporary_enable_optin_tracecontext() def test_both_traceparent_and_tracestate_missing(self, test_agent, test_library): - """ - harness sends a request without traceparent or tracestate + """Harness sends a request without traceparent or tracestate expects a valid traceparent from the output header """ with test_library: @@ -50,8 +49,7 @@ def test_both_traceparent_and_tracestate_missing(self, test_agent, test_library) context.library == "ruby", reason="Propagators not configured for DD_TRACE_PROPAGATION_STYLE config" ) def test_single_key_traceparent_included_tracestate_missing(self, test_agent, test_library): - """ - harness sends a request with traceparent but without tracestate + """Harness sends a request with traceparent but without tracestate expects a valid traceparent from the output header, with the same trace_id but different parent_id """ with test_library: @@ -64,8 +62,7 @@ def test_single_key_traceparent_included_tracestate_missing(self, test_agent, te @temporary_enable_optin_tracecontext() def test_traceparent_included_tracestate_missing(self, test_agent, test_library): - """ - harness sends a request with traceparent but without tracestate + """Harness sends a request with traceparent but without tracestate expects a valid traceparent from the output header, with the same trace_id but different parent_id """ with test_library: @@ -99,8 +96,7 @@ def test_traceparent_included_tracestate_missing(self, test_agent, test_library) reason="the tracer should reject the incoming traceparent(s) when there are multiple traceparent headers", ) def test_traceparent_duplicated(self, test_agent, test_library): - """ - harness sends a request with two traceparent headers + """Harness sends a request with two traceparent headers expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -117,8 +113,7 @@ def test_traceparent_duplicated(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_header_name(self, test_agent, test_library): - """ - harness sends an invalid traceparent using wrong names + """Harness sends an invalid traceparent using wrong names expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -136,8 +131,7 @@ def test_traceparent_header_name(self, test_agent, test_library): @temporary_enable_optin_tracecontext() @missing_feature(context.library == "ruby", reason="Ruby doesn't support case-insensitive distributed headers") def test_traceparent_header_name_valid_casing(self, test_agent, test_library): - """ - harness sends a valid traceparent using different combination of casing + """Harness sends a valid traceparent using different combination of casing expects a valid traceparent from the output header """ with test_library: @@ -159,8 +153,7 @@ def test_traceparent_header_name_valid_casing(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_version_0x00(self, test_agent, test_library): - """ - harness sends an invalid traceparent with extra trailing characters + """Harness sends an invalid traceparent with extra trailing characters expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -183,8 +176,7 @@ def test_traceparent_version_0x00(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_version_0xcc(self, test_agent, test_library): - """ - harness sends an valid traceparent with future version 204 (0xcc) + """Harness sends an valid traceparent with future version 204 (0xcc) expects a valid traceparent from the output header with the same trace_id """ with test_library: @@ -218,8 +210,7 @@ def test_traceparent_version_0xcc(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_version_0xff(self, test_agent, test_library): - """ - harness sends an invalid traceparent with version 255 (0xff) + """Harness sends an invalid traceparent with version 255 (0xff) expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -231,8 +222,7 @@ def test_traceparent_version_0xff(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_version_illegal_characters(self, test_agent, test_library): - """ - harness sends an invalid traceparent with illegal characters in version + """Harness sends an invalid traceparent with illegal characters in version expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -249,8 +239,7 @@ def test_traceparent_version_illegal_characters(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_version_too_long(self, test_agent, test_library): - """ - harness sends an invalid traceparent with version more than 2 HEXDIG + """Harness sends an invalid traceparent with version more than 2 HEXDIG expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -267,8 +256,7 @@ def test_traceparent_version_too_long(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_version_too_short(self, test_agent, test_library): - """ - harness sends an invalid traceparent with version less than 2 HEXDIG + """Harness sends an invalid traceparent with version less than 2 HEXDIG expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -280,8 +268,7 @@ def test_traceparent_version_too_short(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_trace_id_all_zero(self, test_agent, test_library): - """ - harness sends an invalid traceparent with trace_id = 00000000000000000000000000000000 + """Harness sends an invalid traceparent with trace_id = 00000000000000000000000000000000 expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -293,8 +280,7 @@ def test_traceparent_trace_id_all_zero(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_trace_id_illegal_characters(self, test_agent, test_library): - """ - harness sends an invalid traceparent with illegal characters in trace_id + """Harness sends an invalid traceparent with illegal characters in trace_id expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -311,8 +297,7 @@ def test_traceparent_trace_id_illegal_characters(self, test_agent, test_library) @temporary_enable_optin_tracecontext() def test_traceparent_trace_id_too_long(self, test_agent, test_library): - """ - harness sends an invalid traceparent with trace_id more than 32 HEXDIG + """Harness sends an invalid traceparent with trace_id more than 32 HEXDIG expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -326,8 +311,7 @@ def test_traceparent_trace_id_too_long(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_trace_id_too_short(self, test_agent, test_library): - """ - harness sends an invalid traceparent with trace_id less than 32 HEXDIG + """Harness sends an invalid traceparent with trace_id less than 32 HEXDIG expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -339,8 +323,7 @@ def test_traceparent_trace_id_too_short(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_parent_id_all_zero(self, test_agent, test_library): - """ - harness sends an invalid traceparent with parent_id = 0000000000000000 + """Harness sends an invalid traceparent with parent_id = 0000000000000000 expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -352,8 +335,7 @@ def test_traceparent_parent_id_all_zero(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_parent_id_illegal_characters(self, test_agent, test_library): - """ - harness sends an invalid traceparent with illegal characters in parent_id + """Harness sends an invalid traceparent with illegal characters in parent_id expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -370,8 +352,7 @@ def test_traceparent_parent_id_illegal_characters(self, test_agent, test_library @temporary_enable_optin_tracecontext() def test_traceparent_parent_id_too_long(self, test_agent, test_library): - """ - harness sends an invalid traceparent with parent_id more than 16 HEXDIG + """Harness sends an invalid traceparent with parent_id more than 16 HEXDIG expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -383,8 +364,7 @@ def test_traceparent_parent_id_too_long(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_parent_id_too_short(self, test_agent, test_library): - """ - harness sends an invalid traceparent with parent_id less than 16 HEXDIG + """Harness sends an invalid traceparent with parent_id less than 16 HEXDIG expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -396,8 +376,7 @@ def test_traceparent_parent_id_too_short(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_trace_flags_illegal_characters(self, test_agent, test_library): - """ - harness sends an invalid traceparent with illegal characters in trace_flags + """Harness sends an invalid traceparent with illegal characters in trace_flags expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -414,8 +393,7 @@ def test_traceparent_trace_flags_illegal_characters(self, test_agent, test_libra @temporary_enable_optin_tracecontext() def test_traceparent_trace_flags_too_long(self, test_agent, test_library): - """ - harness sends an invalid traceparent with trace_flags more than 2 HEXDIG + """Harness sends an invalid traceparent with trace_flags more than 2 HEXDIG expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -427,8 +405,7 @@ def test_traceparent_trace_flags_too_long(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_trace_flags_too_short(self, test_agent, test_library): - """ - harness sends an invalid traceparent with trace_flags less than 2 HEXDIG + """Harness sends an invalid traceparent with trace_flags less than 2 HEXDIG expects a valid traceparent from the output header, with a newly generated trace_id """ with test_library: @@ -440,8 +417,7 @@ def test_traceparent_trace_flags_too_short(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_traceparent_ows_handling(self, test_agent, test_library): - """ - harness sends an valid traceparent with heading and trailing OWS + """Harness sends an valid traceparent with heading and trailing OWS expects a valid traceparent from the output header """ with test_library: @@ -473,8 +449,7 @@ def test_traceparent_ows_handling(self, test_agent, test_library): @temporary_enable_optin_tracecontext() def test_tracestate_included_traceparent_missing(self, test_agent, test_library): - """ - harness sends a request with tracestate but without traceparent + """Harness sends a request with tracestate but without traceparent expects a valid traceparent from the output header expects the tracestate to be discarded """ @@ -488,8 +463,7 @@ def test_tracestate_included_traceparent_missing(self, test_agent, test_library) @temporary_enable_optin_tracecontext() def test_tracestate_included_traceparent_included(self, test_agent, test_library): - """ - harness sends a request with both tracestate and traceparent + """Harness sends a request with both tracestate and traceparent expects a valid traceparent from the output header with the same trace_id expects the tracestate to be inherited """ @@ -508,8 +482,7 @@ def test_tracestate_included_traceparent_included(self, test_agent, test_library @temporary_enable_optin_tracecontext() def test_tracestate_header_name(self, test_agent, test_library): - """ - harness sends an invalid tracestate using wrong names + """Harness sends an invalid tracestate using wrong names expects the tracestate to be discarded """ with test_library: @@ -535,8 +508,7 @@ def test_tracestate_header_name(self, test_agent, test_library): @temporary_enable_optin_tracecontext() @missing_feature(context.library == "ruby", reason="Ruby doesn't support case-insensitive distributed headers") def test_tracestate_header_name_valid_casing(self, test_agent, test_library): - """ - harness sends a valid tracestate using different combination of casing + """Harness sends a valid tracestate using different combination of casing expects the tracestate to be inherited """ with test_library: @@ -578,8 +550,7 @@ def test_tracestate_header_name_valid_casing(self, test_agent, test_library): reason="python does not reconcile duplicate http headers, if duplicate headers received one only one will be used", ) def test_tracestate_empty_header(self, test_agent, test_library): - """ - harness sends a request with empty tracestate header + """Harness sends a request with empty tracestate header expects the empty tracestate to be discarded """ with test_library: @@ -634,8 +605,7 @@ def test_tracestate_empty_header(self, test_agent, test_library): reason="python does not reconcile duplicate http headers, if duplicate headers received one only one will be used", ) def test_tracestate_multiple_headers_different_keys(self, test_agent, test_library): - """ - harness sends a request with multiple tracestate headers, each contains different set of keys + """Harness sends a request with multiple tracestate headers, each contains different set of keys expects a combined tracestate """ with test_library: @@ -662,8 +632,7 @@ def test_tracestate_multiple_headers_different_keys(self, test_agent, test_libra @temporary_enable_optin_tracecontext() def test_tracestate_duplicated_keys(self, test_agent, test_library): - """ - harness sends a request with an invalid tracestate header with duplicated keys + """Harness sends a request with an invalid tracestate header with duplicated keys expects the tracestate to be inherited, and the duplicated keys to be either kept as-is or one of them to be discarded """ @@ -723,9 +692,7 @@ def test_tracestate_duplicated_keys(self, test_agent, test_library): @missing_feature(context.library < "ruby@2.0.0", reason="Not implemented") @missing_feature(context.library < "golang@1.64.0", reason="Not implemented") def test_tracestate_w3c_p_extract(self, test_agent, test_library): - """ - Ensure the last parent id tag is set according to the W3C Phase 2 spec - """ + """Ensure the last parent id tag is set according to the W3C Phase 2 spec""" with test_library: with test_library.dd_extract_headers_and_make_child_span( "p_set", @@ -768,9 +735,7 @@ def test_tracestate_w3c_p_extract(self, test_agent, test_library): @missing_feature(context.library < "ruby@2.0.0", reason="Not implemented") @missing_feature(context.library < "golang@1.64.0", reason="Not implemented") def test_tracestate_w3c_p_inject(self, test_agent, test_library): - """ - Ensure the last parent id is propagated according to the W3C spec - """ + """Ensure the last parent id is propagated according to the W3C spec""" with test_library: with test_library.dd_start_span(name="new_span") as span: headers = test_library.dd_inject_headers(span.span_id) @@ -780,7 +745,7 @@ def test_tracestate_w3c_p_inject(self, test_agent, test_library): tracestate = tracestate_headers[0][1] # FIXME: nodejs paramerric app sets span.span_id to a string, convert this to an int - assert "p:{:016x}".format(int(span.span_id)) in tracestate + assert f"p:{int(span.span_id):016x}" in tracestate @missing_feature(context.library < "python@2.10.0", reason="Not implemented") @missing_feature(context.library == "dotnet", reason="Not implemented") @@ -792,9 +757,7 @@ def test_tracestate_w3c_p_inject(self, test_agent, test_library): @missing_feature(context.library < "golang@1.64.0", reason="Not implemented") @pytest.mark.parametrize("library_env", [{"DD_TRACE_PROPAGATION_STYLE": "datadog,tracecontext"}]) def test_tracestate_w3c_p_extract_datadog_w3c(self, test_agent, test_library): - """ - Ensure the last parent id tag is set according to the W3C phase 3 spec - """ + """Ensure the last parent id tag is set according to the W3C phase 3 spec""" with test_library: # 1) Trace ids and parent ids in datadog and tracecontext headers match with test_library.dd_extract_headers_and_make_child_span( @@ -908,9 +871,7 @@ def test_tracestate_w3c_p_extract_datadog_w3c(self, test_agent, test_library): @missing_feature(context.library == "cpp", reason="Not implemented") @missing_feature(context.library == "php", reason="Not implemented") def test_tracestate_w3c_p_phase_3_extract_first(self, test_agent, test_library): - """ - Ensure the last parent id tag is not set when only Datadog headers are extracted - """ + """Ensure the last parent id tag is not set when only Datadog headers are extracted""" # 1) Datadog and tracecontext headers, parent ids do not match with test_library.dd_extract_headers_and_make_child_span( @@ -936,9 +897,7 @@ def test_tracestate_w3c_p_phase_3_extract_first(self, test_agent, test_library): @missing_feature(context.library < "java@1.36", reason="Not implemented") @pytest.mark.parametrize("library_env", [{"DD_TRACE_PROPAGATION_STYLE": "datadog,tracecontext"}]) def test_tracestate_w3c_context_leak(self, test_agent, test_library): - """ - Ensure high order bits do not leak between traces - """ + """Ensure high order bits do not leak between traces""" with test_library: with test_library.dd_extract_headers_and_make_child_span( "high_order_64_bits_set", @@ -972,12 +931,11 @@ def test_tracestate_w3c_context_leak(self, test_agent, test_library): ) assert case1["meta"].get("_dd.p.tid") == "3333333333333333" - assert case2["meta"].get("_dd.p.tid") == None + assert case2["meta"].get("_dd.p.tid") is None @temporary_enable_optin_tracecontext() def test_tracestate_all_allowed_characters(self, test_agent, test_library): - """ - harness sends a request with a valid tracestate header with all legal characters + """Harness sends a request with a valid tracestate header with all legal characters expects the tracestate to be inherited """ key_without_vendor = "".join( @@ -1020,8 +978,7 @@ def test_tracestate_all_allowed_characters(self, test_agent, test_library): context.library == "php", reason="PHP may preserve whitespace of foreign vendors trracestate (allowed per spec)" ) def test_tracestate_ows_handling(self, test_agent, test_library): - """ - harness sends a request with a valid tracestate header with OWS + """Harness sends a request with a valid tracestate header with OWS expects the tracestate to be inherited """ with test_library: diff --git a/tests/parametric/test_headers_tracestate_dd.py b/tests/parametric/test_headers_tracestate_dd.py index c8635926e4..dd17a6b331 100644 --- a/tests/parametric/test_headers_tracestate_dd.py +++ b/tests/parametric/test_headers_tracestate_dd.py @@ -21,8 +21,7 @@ def temporary_enable_propagationstyle_default() -> Any: class Test_Headers_Tracestate_DD: @temporary_enable_propagationstyle_default() def test_headers_tracestate_dd_propagate_samplingpriority(self, test_agent, test_library): - """ - harness sends a request with both tracestate and traceparent + """Harness sends a request with both tracestate and traceparent expects a valid traceparent from the output header with the same trace_id expects the tracestate to be inherited """ @@ -186,8 +185,7 @@ def test_headers_tracestate_dd_propagate_samplingpriority(self, test_agent, test @temporary_enable_propagationstyle_default() def test_headers_tracestate_dd_propagate_origin(self, test_agent, test_library): - """ - harness sends a request with both tracestate and traceparent + """Harness sends a request with both tracestate and traceparent expects a valid traceparent from the output header with the same trace_id expects the tracestate to be inherited """ @@ -319,8 +317,7 @@ def test_headers_tracestate_dd_propagate_origin(self, test_agent, test_library): @bug(context.library in ["python@2.7.2", "python@2.7.3"], reason="AIT-9945") @bug(context.library == "ruby", reason="APMAPI-812") def test_headers_tracestate_dd_propagate_propagatedtags(self, test_agent, test_library): - """ - harness sends a request with both tracestate and traceparent + """Harness sends a request with both tracestate and traceparent expects a valid traceparent from the output header with the same trace_id expects the tracestate to be inherited """ @@ -430,8 +427,7 @@ def test_headers_tracestate_dd_propagate_propagatedtags(self, test_agent, test_l @missing_feature(context.library == "python", reason="Issue: Does not drop dm") @missing_feature(context.library == "ruby", reason="Issue: does not escape '~' characters to '=' in _dd.p.usr.id") def test_headers_tracestate_dd_propagate_propagatedtags_change_sampling_same_dm(self, test_agent, test_library): - """ - harness sends a request with both tracestate and traceparent + """Harness sends a request with both tracestate and traceparent expects a valid traceparent from the output header with the same trace_id expects the tracestate to be inherited expects the decision maker to be passed through as DEFAULT @@ -495,8 +491,7 @@ def test_headers_tracestate_dd_propagate_propagatedtags_change_sampling_same_dm( @missing_feature(context.library == "python", reason="Issue: Does not reset dm to DEFAULT") @missing_feature(context.library == "ruby", reason="Issue: Does not reset dm to DEFAULT") def test_headers_tracestate_dd_propagate_propagatedtags_change_sampling_reset_dm(self, test_agent, test_library): - """ - harness sends a request with both tracestate and traceparent + """Harness sends a request with both tracestate and traceparent expects a valid traceparent from the output header with the same trace_id expects the tracestate to be inherited expects the decision maker to be reset to DEFAULT @@ -557,8 +552,7 @@ def test_headers_tracestate_dd_propagate_propagatedtags_change_sampling_reset_dm @temporary_enable_propagationstyle_default() @bug(library="php", reason="APMAPI-916") def test_headers_tracestate_dd_keeps_32_or_fewer_list_members(self, test_agent, test_library): - """ - harness sends requests with both tracestate and traceparent. + """Harness sends requests with both tracestate and traceparent. all items in the input tracestate are propagated because the resulting number of list-members in the tracestate is less than or equal to 32 """ @@ -629,8 +623,7 @@ def test_headers_tracestate_dd_keeps_32_or_fewer_list_members(self, test_agent, @bug(library="python", reason="APMAPI-914") @bug(library="php", reason="APMAPI-916") def test_headers_tracestate_dd_evicts_32_or_greater_list_members(self, test_agent, test_library): - """ - harness sends a request with both tracestate and traceparent. + """Harness sends a request with both tracestate and traceparent. the last list-member in the input tracestate is removed from the output tracestate string because the maximum number of list-members is 32. """ diff --git a/tests/parametric/test_library_tracestats.py b/tests/parametric/test_library_tracestats.py index a9e05dbc57..b56a9c7fb3 100644 --- a/tests/parametric/test_library_tracestats.py +++ b/tests/parametric/test_library_tracestats.py @@ -1,10 +1,7 @@ import base64 -import pprint from typing import Any -from typing import Optional -from typing import List -import numpy +import numpy as np import msgpack import pytest @@ -13,6 +10,7 @@ from utils.parametric.spec.trace import V06StatsAggr from utils.parametric.spec.trace import find_root_span from utils import missing_feature, context, scenarios, features +from utils.tools import logger parametrize = pytest.mark.parametrize @@ -24,7 +22,7 @@ def _human_stats(stats: V06StatsAggr) -> str: return str(filtered_copy) -def enable_tracestats(sample_rate: Optional[float] = None) -> Any: +def enable_tracestats(sample_rate: float | None = None) -> Any: env = { "DD_TRACE_STATS_COMPUTATION_ENABLED": "1", # reference, dotnet, python, golang "DD_TRACE_TRACER_METRICS_ENABLED": "true", # java @@ -46,11 +44,10 @@ class Test_Library_Tracestats: @missing_feature(context.library == "php", reason="php has not implemented stats computation yet") @missing_feature(context.library == "ruby", reason="ruby has not implemented stats computation yet") def test_metrics_msgpack_serialization_TS001(self, library_env, test_agent, test_library): - """ - When spans are finished - Each trace has stats metrics computed for it serialized properly in msgpack format with required fields - The required metrics are: - {error_count, hit_count, ok/error latency distributions, duration} + """When spans are finished + Each trace has stats metrics computed for it serialized properly in msgpack format with required fields + The required metrics are: + {error_count, hit_count, ok/error latency distributions, duration} """ with test_library: with test_library.dd_start_span(name="web.request", resource="/users", service="webserver"): @@ -69,7 +66,7 @@ def test_metrics_msgpack_serialization_TS001(self, library_env, test_agent, test agent_decoded_stats = decoded_stats_requests[0]["body"]["Stats"][0]["Stats"][0] assert len(decoded_stats_requests) == 1 assert len(decoded_stats_requests[0]["body"]["Stats"]) == 1 - pprint.pprint([_human_stats(s) for s in decoded_stats_requests[0]["body"]["Stats"][0]["Stats"]]) + logger.debug([_human_stats(s) for s in decoded_stats_requests[0]["body"]["Stats"][0]["Stats"]]) assert deserialized_stats["Name"] == "web.request" assert deserialized_stats["Resource"] == "/users" assert deserialized_stats["Service"] == "webserver" @@ -101,41 +98,40 @@ def test_metrics_msgpack_serialization_TS001(self, library_env, test_agent, test @missing_feature(context.library == "php", reason="php has not implemented stats computation yet") @missing_feature(context.library == "ruby", reason="ruby has not implemented stats computation yet") def test_distinct_aggregationkeys_TS003(self, library_env, test_agent, test_library): - """ - When spans are created with a unique set of dimensions - Each span has stats computed for it and is in its own bucket - The dimensions are: { service, type, name, resource, HTTP_status_code, synthetics } + """When spans are created with a unique set of dimensions + Each span has stats computed for it and is in its own bucket + The dimensions are: { service, type, name, resource, HTTP_status_code, synthetics } """ name = "name" resource = "resource" service = "service" - type = "http" + span_type = "http" http_status_code = "200" origin = "rum" with test_library: # Baseline - with test_library.dd_start_span(name=name, resource=resource, service=service, typestr=type) as span: + with test_library.dd_start_span(name=name, resource=resource, service=service, typestr=span_type) as span: span.set_meta(key="_dd.origin", val=origin) span.set_meta(key="http.status_code", val=http_status_code) # Unique Name with test_library.dd_start_span( - name="unique-name", resource=resource, service=service, typestr=type + name="unique-name", resource=resource, service=service, typestr=span_type ) as span: span.set_meta(key="_dd.origin", val=origin) span.set_meta(key="http.status_code", val=http_status_code) # Unique Resource with test_library.dd_start_span( - name=name, resource="unique-resource", service=service, typestr=type + name=name, resource="unique-resource", service=service, typestr=span_type ) as span: span.set_meta(key="_dd.origin", val=origin) span.set_meta(key="http.status_code", val=http_status_code) # Unique Service with test_library.dd_start_span( - name=name, resource=resource, service="unique-service", typestr=type + name=name, resource=resource, service="unique-service", typestr=span_type ) as span: span.set_meta(key="_dd.origin", val=origin) span.set_meta(key="http.status_code", val=http_status_code) @@ -148,12 +144,12 @@ def test_distinct_aggregationkeys_TS003(self, library_env, test_agent, test_libr span.set_meta(key="http.status_code", val=http_status_code) # Unique Synthetics - with test_library.dd_start_span(name=name, resource=resource, service=service, typestr=type) as span: + with test_library.dd_start_span(name=name, resource=resource, service=service, typestr=span_type) as span: span.set_meta(key="_dd.origin", val="synthetics") span.set_meta(key="http.status_code", val=http_status_code) # Unique HTTP Status Code - with test_library.dd_start_span(name=name, resource=resource, service=service, typestr=type) as span: + with test_library.dd_start_span(name=name, resource=resource, service=service, typestr=span_type) as span: span.set_meta(key="_dd.origin", val=origin) span.set_meta(key="http.status_code", val="400") @@ -183,9 +179,8 @@ def test_distinct_aggregationkeys_TS003(self, library_env, test_agent, test_libr @missing_feature(context.library == "ruby", reason="ruby has not implemented stats computation yet") @enable_tracestats() def test_measured_spans_TS004(self, library_env, test_agent, test_library): - """ - When spans are marked as measured - Each has stats computed for it + """When spans are marked as measured + Each has stats computed for it """ with test_library: with test_library.dd_start_span(name="web.request", resource="/users", service="webserver") as span: @@ -207,7 +202,7 @@ def test_measured_spans_TS004(self, library_env, test_agent, test_library): requests = test_agent.v06_stats_requests() assert len(requests) > 0 stats = requests[0]["body"]["Stats"][0]["Stats"] - pprint.pprint([_human_stats(s) for s in stats]) + logger.debug([_human_stats(s) for s in stats]) assert len(stats) == 3 web_stats = [s for s in stats if s["Name"] == "web.request"][0] @@ -225,9 +220,8 @@ def test_measured_spans_TS004(self, library_env, test_agent, test_library): @missing_feature(context.library == "ruby", reason="ruby has not implemented stats computation yet") @enable_tracestats() def test_top_level_TS005(self, library_env, test_agent, test_library): - """ - When top level (service entry) spans are created - Each top level span has trace stats computed for it. + """When top level (service entry) spans are created + Each top level span has trace stats computed for it. """ with test_library: # Create a top level span. @@ -275,9 +269,8 @@ def test_top_level_TS005(self, library_env, test_agent, test_library): @missing_feature(context.library == "ruby", reason="ruby has not implemented stats computation yet") @enable_tracestats() def test_successes_errors_recorded_separately_TS006(self, library_env, test_agent, test_library): - """ - When spans are marked as errors - The errors count is incremented appropriately and the stats are aggregated into the ErrorSummary + """When spans are marked as errors + The errors count is incremented appropriately and the stats are aggregated into the ErrorSummary """ with test_library: # Send 2 successes @@ -331,10 +324,9 @@ def test_successes_errors_recorded_separately_TS006(self, library_env, test_agen @missing_feature(context.library == "ruby", reason="ruby has not implemented stats computation yet") @enable_tracestats(sample_rate=0.0) def test_sample_rate_0_TS007(self, library_env, test_agent, test_library): - """ - When the sample rate is 0 and trace stats is enabled - non-P0 traces should be dropped - trace stats should be produced + """When the sample rate is 0 and trace stats is enabled + non-P0 traces should be dropped + trace stats should be produced """ with test_library: with test_library.dd_start_span(name="web.request", resource="/users", service="webserver"): @@ -353,8 +345,7 @@ def test_sample_rate_0_TS007(self, library_env, test_agent, test_library): @missing_feature(reason="relative error test is broken") @enable_tracestats() def test_relative_error_TS008(self, library_env, test_agent, test_library): - """ - When trace stats are computed for traces + """When trace stats are computed for traces The stats should be accurate to within 1% of the real values Note that this test uses the duration of actual spans created and so this test could be flaky. @@ -370,7 +361,7 @@ def test_relative_error_TS008(self, library_env, test_agent, test_library): traces = test_agent.traces() assert len(traces) == 10 - durations: List[int] = [] + durations: list[int] = [] for trace in traces: span = find_root_span(trace) assert span is not None @@ -385,11 +376,11 @@ def test_relative_error_TS008(self, library_env, test_agent, test_library): assert web_stats["Hits"] == 10 # Validate the sketches - np_duration = numpy.array(durations) + np_duration = np.array(durations) assert web_stats["Duration"] == sum(durations), "Stats duration should match the span duration exactly" for quantile in (0.5, 0.75, 0.95, 0.99, 1): assert web_stats["OkSummary"].get_quantile_value(quantile) == pytest.approx( - numpy.quantile(np_duration, quantile), + np.quantile(np_duration, quantile), rel=0.01, ), "Quantile mismatch for quantile %r" % quantile @@ -399,24 +390,23 @@ def test_relative_error_TS008(self, library_env, test_agent, test_library): @missing_feature(context.library == "ruby", reason="ruby has not implemented stats computation yet") @enable_tracestats() def test_metrics_computed_after_span_finsh_TS009(self, library_env, test_agent, test_library): - """ - When trace stats are computed for traces - Metrics must be computed after spans are finished, otherwise components of the aggregation key may change after - contribution to aggregates. + """When trace stats are computed for traces + Metrics must be computed after spans are finished, otherwise components of the aggregation key may change after + contribution to aggregates. """ name = "name" resource = "resource" service = "service" - type = "http" + span_type = "http" http_status_code = "200" origin = "synthetics" with test_library: - with test_library.dd_start_span(name=name, service=service, resource=resource, typestr=type) as span: + with test_library.dd_start_span(name=name, service=service, resource=resource, typestr=span_type) as span: span.set_meta(key="_dd.origin", val=origin) span.set_meta(key="http.status_code", val=http_status_code) - with test_library.dd_start_span(name=name, service=service, resource=resource, typestr=type) as span2: + with test_library.dd_start_span(name=name, service=service, resource=resource, typestr=span_type) as span2: span2.set_meta(key="_dd.origin", val=origin) span2.set_meta(key="http.status_code", val=http_status_code) @@ -451,10 +441,9 @@ def test_metrics_computed_after_span_finsh_TS009(self, library_env, test_agent, @missing_feature(context.library == "php", reason="php has not implemented stats computation yet") @parametrize("library_env", [{"DD_TRACE_STATS_COMPUTATION_ENABLED": "0"}]) def test_metrics_computed_after_span_finish_TS010(self, library_env, test_agent, test_library): - """ - When DD_TRACE_STATS_COMPUTATION_ENABLED=False - Metrics must be computed after spans are finished, otherwise components of the aggregation key may change after - contribution to aggregates. + """When DD_TRACE_STATS_COMPUTATION_ENABLED=False + Metrics must be computed after spans are finished, otherwise components of the aggregation key may change after + contribution to aggregates. """ with test_library: with test_library.dd_start_span(name="name", service="service", resource="resource") as span: diff --git a/tests/parametric/test_otel_api_interoperability.py b/tests/parametric/test_otel_api_interoperability.py index f936d5bf16..dc853744b5 100644 --- a/tests/parametric/test_otel_api_interoperability.py +++ b/tests/parametric/test_otel_api_interoperability.py @@ -1,8 +1,6 @@ -import json - import pytest -from utils import bug, missing_feature, irrelevant, context, scenarios, features +from utils import scenarios, features from opentelemetry.trace import SpanKind from utils.parametric.spec.trace import find_trace, find_span, retrieve_span_links, find_only_span, find_root_span @@ -26,9 +24,7 @@ @scenarios.parametric class Test_Otel_API_Interoperability: def test_span_creation_using_otel(self, test_agent, test_library): - """ - - A span created with the OTel API should be visible in the DD API - """ + """- A span created with the OTel API should be visible in the DD API""" with test_library: with test_library.otel_start_span("otel_span") as otel_span: dd_current_span = test_library.dd_current_span() @@ -37,9 +33,7 @@ def test_span_creation_using_otel(self, test_agent, test_library): assert dd_current_span.span_id == otel_span.span_id def test_span_creation_using_datadog(self, test_agent, test_library): - """ - - A span created with the DD API should be visible in the OTel API - """ + """- A span created with the DD API should be visible in the OTel API""" with test_library: with test_library.dd_start_span("dd_span") as dd_span: otel_current_span = test_library.otel_current_span() @@ -48,9 +42,7 @@ def test_span_creation_using_datadog(self, test_agent, test_library): assert otel_current_span.span_id == dd_span.span_id def test_otel_start_after_datadog_span(self, test_agent, test_library): - """ - - Start a span using the OTel API while a span created using the Datadog API already exists - """ + """- Start a span using the OTel API while a span created using the Datadog API already exists""" with test_library: with test_library.dd_start_span("dd_span") as dd_span: with test_library.otel_start_span( @@ -61,7 +53,7 @@ def test_otel_start_after_datadog_span(self, test_agent, test_library): # FIXME: The trace_id is encoded in hex while span_id is an int. Make this API consistent assert current_dd_span.trace_id == otel_context.get("trace_id") - assert "{:016x}".format(int(current_dd_span.span_id)) == otel_context.get("span_id") + assert f"{int(current_dd_span.span_id):016x}" == otel_context.get("span_id") dd_span.finish() traces = test_agent.wait_for_num_traces(1, sort_by_start=False) @@ -76,9 +68,7 @@ def test_otel_start_after_datadog_span(self, test_agent, test_library): assert span.get("parent_id") == root.get("span_id") def test_has_ended(self, test_agent, test_library): - """ - - Test that the ending status of a span is propagated across APIs - """ + """- Test that the ending status of a span is propagated across APIs""" with test_library: with test_library.dd_start_span("dd_span") as dd_span: dd_current_span = test_library.otel_current_span() @@ -97,9 +87,7 @@ def test_has_ended(self, test_agent, test_library): assert len(trace) == 1 def test_datadog_start_after_otel_span(self, test_agent, test_library): - """ - - Start a span using the Datadog API while a span created using the OTel API already exists - """ + """- Start a span using the Datadog API while a span created using the OTel API already exists""" with test_library: with test_library.otel_start_span(name="otel_span", span_kind=SpanKind.INTERNAL) as otel_span: with test_library.dd_start_span(name="dd_span", parent_id=otel_span.span_id) as dd_span: @@ -125,9 +113,7 @@ def test_datadog_start_after_otel_span(self, test_agent, test_library): assert span.get("parent_id") == root.get("span_id") def test_set_update_remove_meta(self, test_agent, test_library): - """ - - Test that meta is set/updated/removed across APIs - """ + """- Test that meta is set/updated/removed across APIs""" with test_library: with test_library.dd_start_span("dd_span") as dd_span: dd_span.set_meta("arg1", "val1") @@ -166,9 +152,7 @@ def test_set_update_remove_meta(self, test_agent, test_library): assert "arg5" not in meta def test_set_update_remove_metric(self, test_agent, test_library): - """ - - Test that metrics are set/updated/removed across APIs - """ + """- Test that metrics are set/updated/removed across APIs""" with test_library: with test_library.dd_start_span("dd_span") as dd_span: dd_span.set_metric("m1", 1) # Set a metric with the DD API @@ -207,9 +191,7 @@ def test_set_update_remove_metric(self, test_agent, test_library): assert "m5" not in metrics def test_update_resource(self, test_agent, test_library): - """ - - Test that the resource is updated across APIs - """ + """- Test that the resource is updated across APIs""" with test_library: with test_library.otel_start_span("my_resource") as otel_span: dd_span = test_library.dd_current_span() @@ -223,9 +205,7 @@ def test_update_resource(self, test_agent, test_library): assert span.get("resource") == "my_new_resource" def test_span_links_add(self, test_agent, test_library): - """ - - Test that links can be added with the Datadog API on a span created with the OTel API - """ + """- Test that links can be added with the Datadog API on a span created with the OTel API""" with test_library: with test_library.dd_start_span("dd_root") as dd_span: pass @@ -243,9 +223,7 @@ def test_span_links_add(self, test_agent, test_library): assert len(span_links) == 1 def test_concurrent_traces_in_order(self, test_agent, test_library): - """ - - Basic concurrent traces and spans - """ + """- Basic concurrent traces and spans""" with test_library: with test_library.otel_start_span("otel_root", span_kind=SpanKind.SERVER) as otel_root: with test_library.dd_start_span(name="dd_child", parent_id=otel_root.span_id) as dd_child: @@ -281,9 +259,7 @@ def test_concurrent_traces_in_order(self, test_agent, test_library): assert root1["trace_id"] != root2["trace_id"] def test_concurrent_traces_nested_otel_root(self, test_agent, test_library): - """ - - Concurrent traces with nested start/end, with the first trace being opened with the OTel API - """ + """- Concurrent traces with nested start/end, with the first trace being opened with the OTel API""" with test_library: with test_library.otel_start_span(name="otel_root", span_kind=SpanKind.SERVER) as otel_root: with test_library.dd_start_span(name="dd_root", parent_id=0) as dd_root: @@ -329,9 +305,7 @@ def test_concurrent_traces_nested_otel_root(self, test_agent, test_library): assert root1["trace_id"] != root2["trace_id"] def test_concurrent_traces_nested_dd_root(self, test_agent, test_library): - """ - - Concurrent traces with nested start/end, with the first trace being opened with the Datadog API - """ + """- Concurrent traces with nested start/end, with the first trace being opened with the Datadog API""" with test_library: with test_library.dd_start_span(name="dd_root", parent_id=0) as dd_root: with test_library.otel_start_span(name="otel_root", span_kind=SpanKind.SERVER) as otel_root: @@ -377,9 +351,7 @@ def test_concurrent_traces_nested_dd_root(self, test_agent, test_library): assert root1["trace_id"] != root2["trace_id"] def test_distributed_headers_are_propagated_tracecontext(self, test_agent, test_library): - """ - - Test that distributed tracecontext headers are propagated across APIs - """ + """- Test that distributed tracecontext headers are propagated across APIs""" trace_id = "0000000000000000000000000000002a" # 42 parent_id = "0000000000000003" # 3 headers = [ @@ -404,9 +376,7 @@ def test_distributed_headers_are_propagated_tracecontext(self, test_agent, test_ assert root["metrics"]["_sampling_priority_v1"] == 1 def test_distributed_headers_are_propagated_datadog(self, test_agent, test_library): - """ - - Test that distributed datadog headers are propagated across APIs - """ + """- Test that distributed datadog headers are propagated across APIs""" headers = [ ("x-datadog-trace-id", "123456789"), @@ -437,9 +407,7 @@ def test_distributed_headers_are_propagated_datadog(self, test_agent, test_libra assert root["metrics"]["_sampling_priority_v1"] == -2 def test_set_attribute_from_otel(self, test_agent, test_library): - """ - - Test that attributes can be set on a Datadog span using the OTel API - """ + """- Test that attributes can be set on a Datadog span using the OTel API""" with test_library: with test_library.dd_start_span("dd_span") as dd_span: otel_span = test_library.otel_current_span() @@ -477,9 +445,7 @@ def test_set_attribute_from_otel(self, test_agent, test_library): assert root["metrics"]["int_array.2"] == 3 def test_set_attribute_from_datadog(self, test_agent, test_library): - """ - - Test that attributes can be set on an OTel span using the Datadog API - """ + """- Test that attributes can be set on an OTel span using the Datadog API""" with test_library: with test_library.otel_start_span(name="otel_span") as otel_span: dd_span = test_library.dd_current_span() diff --git a/tests/parametric/test_otel_env_vars.py b/tests/parametric/test_otel_env_vars.py index e0a30671b2..d631939278 100644 --- a/tests/parametric/test_otel_env_vars.py +++ b/tests/parametric/test_otel_env_vars.py @@ -1,4 +1,4 @@ -import pytest, os +import pytest from utils import missing_feature, context, scenarios, features, irrelevant @@ -16,6 +16,7 @@ class Test_Otel_Env_Vars: "DD_TRACE_DEBUG": "false", "OTEL_LOG_LEVEL": "debug", "DD_TRACE_SAMPLE_RATE": "0.5", + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0.5}]', "OTEL_TRACES_SAMPLER": "traceidratio", "OTEL_TRACES_SAMPLER_ARG": "0.1", "DD_TRACE_ENABLED": "true", diff --git a/tests/parametric/test_otel_span_methods.py b/tests/parametric/test_otel_span_methods.py index 1c4435f3a3..6a3d727039 100644 --- a/tests/parametric/test_otel_span_methods.py +++ b/tests/parametric/test_otel_span_methods.py @@ -3,7 +3,6 @@ import json import pytest -from typing import Union from utils.parametric._library_client import Link from opentelemetry.trace import StatusCode from opentelemetry.trace import SpanKind @@ -11,8 +10,7 @@ from utils.parametric.spec.trace import find_trace from utils.parametric.spec.trace import retrieve_span_links from utils.parametric.spec.trace import find_first_span_in_trace_payload -from utils.parametric.spec.tracecontext import TRACECONTEXT_FLAGS_SET -from utils import bug, features, missing_feature, irrelevant, flaky, context, scenarios +from utils import bug, features, missing_feature, irrelevant, context, scenarios # this global mark applies to all tests in this file. # DD_TRACE_OTEL_ENABLED=true is required in some tracers (.NET, Python?) @@ -30,9 +28,7 @@ class Test_Otel_Span_Methods: @missing_feature(context.library <= "dotnet@2.41.0", reason="Implemented in 2.42.0") @missing_feature(context.library == "python", reason="New operation name mapping not yet implemented") def test_otel_start_span(self, test_agent, test_library): - """ - - Start/end a span with start and end options - """ + """- Start/end a span with start and end options""" with test_library: duration: int = 6789 @@ -58,9 +54,7 @@ def test_otel_start_span(self, test_agent, test_library): @missing_feature(context.library <= "dotnet@2.41.0", reason="Implemented in 2.42.0") @missing_feature(context.library == "python", reason="New operation name mapping not yet implemented") def test_otel_set_service_name(self, test_agent, test_library): - """ - - Update the service name on a span - """ + """- Update the service name on a span""" with test_library: with test_library.otel_start_span("parent_span", span_kind=SpanKind.INTERNAL) as parent: parent.set_attributes({"service.name": "new_service"}) @@ -81,10 +75,9 @@ def test_otel_set_service_name(self, test_agent, test_library): @missing_feature(context.library < "java@1.35.0", reason="Implemented in 1.35.0") @missing_feature(context.library < "dotnet@2.53.0", reason="Implemented in 2.53.0") def test_otel_set_attribute_remapping_httpresponsestatuscode(self, test_agent, test_library): - """ - - May 2024 update to OTel API RFC requires implementations to remap - OTEL Span attribute 'http.response.status_code' to DD Span tag 'http.status_code'. - This solves an issue with trace metrics when using the OTel API. + """- May 2024 update to OTel API RFC requires implementations to remap + OTEL Span attribute 'http.response.status_code' to DD Span tag 'http.status_code'. + This solves an issue with trace metrics when using the OTel API. """ with test_library: with test_library.otel_start_span("operation") as span: @@ -106,11 +99,10 @@ def test_otel_set_attribute_remapping_httpresponsestatuscode(self, test_agent, t @irrelevant(context.library == "golang", reason="Does not support automatic status code remapping to meta") @irrelevant(context.library == "dotnet", reason="Does not support automatic status code remapping to meta") def test_otel_set_attribute_remapping_httpstatuscode(self, test_agent, test_library): - """ - - May 2024 update to OTel API RFC requires implementations to remap - OTEL Span attribute 'http.response.status_code' to DD Span tag 'http.status_code'. - This test ensures that the original OTEL Span attribute 'http.status_code' - is also set as DD Span tag 'http.status_code' + """- May 2024 update to OTel API RFC requires implementations to remap + OTEL Span attribute 'http.response.status_code' to DD Span tag 'http.status_code'. + This test ensures that the original OTEL Span attribute 'http.status_code' + is also set as DD Span tag 'http.status_code' """ with test_library: with test_library.otel_start_span("operation") as span: @@ -133,8 +125,7 @@ def test_otel_set_attribute_remapping_httpstatuscode(self, test_agent, test_libr @missing_feature(context.library == "nodejs", reason="New operation name mapping not yet implemented") @missing_feature(context.library == "python", reason="New operation name mapping not yet implemented") def test_otel_set_attributes_different_types_legacy(self, test_agent, test_library): - """ - - Set attributes of multiple types for an otel span + """- Set attributes of multiple types for an otel span This tests legacy behavior. The new behavior is tested in test_otel_set_attributes_different_types_with_array_encoding """ @@ -216,9 +207,7 @@ def test_otel_set_attributes_different_types_legacy(self, test_agent, test_libra context.library == "python", reason="New operation name mapping & array encoding not yet implemented" ) def test_otel_set_attributes_different_types_with_array_encoding(self, test_agent, test_library): - """ - - Set attributes of multiple types for an otel span - """ + """- Set attributes of multiple types for an otel span""" start_time = int(time.time()) with test_library: with test_library.otel_start_span("operation", span_kind=SpanKind.PRODUCER, timestamp=start_time) as span: @@ -272,10 +261,9 @@ def test_otel_set_attributes_different_types_with_array_encoding(self, test_agen reason=".NET's native implementation does not change IsAllDataRequested to false after ending a span. OpenTelemetry follows this as well for IsRecording.", ) def test_otel_span_is_recording(self, test_agent, test_library): - """ - Test functionality of ending a span. - - before ending - span.is_recording() is true - - after ending - span.is_recording() is false + """Test functionality of ending a span. + - before ending - span.is_recording() is true + - after ending - span.is_recording() is false """ with test_library: # start parent @@ -291,8 +279,7 @@ def test_otel_span_is_recording(self, test_agent, test_library): ) @missing_feature(context.library == "python", reason="New operation name mapping not yet implemented") def test_otel_span_finished_end_options(self, test_agent, test_library): - """ - Test functionality of ending a span with end options. + """Test functionality of ending a span with end options. After finishing the span, finishing the span with different end options has no effect """ start_time: int = 12345 @@ -319,11 +306,10 @@ def test_otel_span_finished_end_options(self, test_agent, test_library): @missing_feature(context.library <= "dotnet@2.41.0", reason="Implemented in 2.42.0") @missing_feature(context.library == "python", reason="New operation name mapping not yet implemented") def test_otel_span_end(self, test_agent, test_library): - """ - Test functionality of ending a span. After ending: - - operations on that span become noop - - child spans are still running and can be ended later - - still possible to start child spans from parent context + """Test functionality of ending a span. After ending: + - operations on that span become noop + - child spans are still running and can be ended later + - still possible to start child spans from parent context """ with test_library: with test_library.otel_start_span(name="parent", span_kind=SpanKind.PRODUCER, end_on_exit=False) as parent: @@ -358,8 +344,7 @@ def test_otel_span_end(self, test_agent, test_library): ) @missing_feature(context.library == "python", reason="New operation name mapping not yet implemented") def test_otel_set_span_status_error(self, test_agent, test_library): - """ - This test verifies that setting the status of a span + """This test verifies that setting the status of a span behaves accordingly to the Otel API spec (https://opentelemetry.io/docs/reference/specification/trace/api/#set-status) By checking the following: @@ -386,8 +371,7 @@ def test_otel_set_span_status_error(self, test_agent, test_library): reason="Default state of otel spans is OK, updating the status from OK to ERROR is supported", ) def test_otel_set_span_status_ok(self, test_agent, test_library): - """ - This test verifies that setting the status of a span + """This test verifies that setting the status of a span behaves accordingly to the Otel API spec (https://opentelemetry.io/docs/reference/specification/trace/api/#set-status) By checking the following: @@ -409,8 +393,7 @@ def test_otel_set_span_status_ok(self, test_agent, test_library): @bug(context.library < "ruby@2.2.0", reason="APMRP-360") def test_otel_get_span_context(self, test_agent, test_library): - """ - This test verifies retrieving the span context of a span + """This test verifies retrieving the span context of a span accordingly to the Otel API spec (https://opentelemetry.io/docs/reference/specification/trace/api/#get-context) """ @@ -431,7 +414,7 @@ def test_otel_get_span_context(self, test_agent, test_library): else: # Some languages e.g. Node.js using express need to return as a string value # due to 64-bit integers being too large. - assert context.get("span_id") == "{:016x}".format(int(span.span_id)) + assert context.get("span_id") == f"{int(span.span_id):016x}" assert context.get("trace_flags") == "01" # compare the values of the span context with the values of the trace sent to the agent @@ -449,8 +432,7 @@ def test_otel_get_span_context(self, test_agent, test_library): @missing_feature(context.library <= "dotnet@2.41.0", reason="Implemented in 2.42.0") @missing_feature(context.library == "python", reason="Not implemented") def test_otel_set_attributes_separately(self, test_agent, test_library): - """ - This test verifies that setting attributes separately + """This test verifies that setting attributes separately behaves accordingly to the naming conventions """ with test_library: @@ -652,9 +634,7 @@ def test_otel_span_operation_name( @missing_feature(context.library <= "dotnet@2.41.0", reason="Implemented in 2.42.0") @missing_feature(context.library == "python", reason="Not implemented") def test_otel_span_reserved_attributes_overrides(self, test_agent, test_library): - """ - Tests that the reserved attributes will override expected values - """ + """Tests that the reserved attributes will override expected values""" with test_library: with test_library.otel_start_span("otel_span_name", span_kind=SpanKind.SERVER) as span: span.set_attributes({"http.request.method": "GET"}) @@ -691,11 +671,9 @@ def test_otel_span_reserved_attributes_overrides(self, test_agent, test_library) [("true", 1), ("TRUE", 1), ("True", 1), ("false", 0), ("False", 0), ("FALSE", 0), (True, 1), (False, 0)], ) def test_otel_span_basic_reserved_attributes_overrides_analytics_event( - self, analytics_event_value: Union[bool, str], expected_metric_value: Union[int, None], test_agent, test_library + self, analytics_event_value: bool | str, expected_metric_value: int | None, test_agent, test_library ): - """ - Tests the analytics.event reserved attribute override with basic inputs - """ + """Tests the analytics.event reserved attribute override with basic inputs""" run_otel_span_reserved_attributes_overrides_analytics_event( analytics_event_value=analytics_event_value, expected_metric_value=expected_metric_value, @@ -723,10 +701,9 @@ def test_otel_span_basic_reserved_attributes_overrides_analytics_event( "analytics_event_value,expected_metric_value", [("something-else", None), ("fAlse", None), ("trUe", None)] ) def test_otel_span_strict_reserved_attributes_overrides_analytics_event( - self, analytics_event_value: Union[bool, str], expected_metric_value: Union[int, None], test_agent, test_library + self, analytics_event_value: bool | str, expected_metric_value: int | None, test_agent, test_library ): - """ - Tests that the analytics.event reserved attribute override doesn't set the _dd1.sr.eausr metric + """Tests that the analytics.event reserved attribute override doesn't set the _dd1.sr.eausr metric for inputs that aren't accepted by strconv.ParseBool """ run_otel_span_reserved_attributes_overrides_analytics_event( @@ -746,11 +723,9 @@ def test_otel_span_strict_reserved_attributes_overrides_analytics_event( "analytics_event_value,expected_metric_value", [("t", 1), ("T", 1), ("f", 0), ("F", 0), ("1", 1), ("0", 0)] ) def test_otel_span_extended_reserved_attributes_overrides_analytics_event( - self, analytics_event_value: Union[bool, str], expected_metric_value: Union[int, None], test_agent, test_library + self, analytics_event_value: bool | str, expected_metric_value: int | None, test_agent, test_library ): - """ - Tests that the analytics.event reserved attribute override accepts Go's strconv.ParseBool additional values - """ + """Tests that the analytics.event reserved attribute override accepts Go's strconv.ParseBool additional values""" run_otel_span_reserved_attributes_overrides_analytics_event( analytics_event_value=analytics_event_value, expected_metric_value=expected_metric_value, @@ -767,9 +742,7 @@ def test_otel_span_extended_reserved_attributes_overrides_analytics_event( @missing_feature(context.library < "nodejs@5.17.0", reason="Implemented in v5.17.0 & v4.41.0") @missing_feature(context.library < "python@2.9.0", reason="Not implemented") def test_otel_add_event_meta_serialization(self, test_agent, test_library): - """ - Tests the Span.AddEvent API and its serialization into the meta tag 'events' - """ + """Tests the Span.AddEvent API and its serialization into the meta tag 'events'""" # Since timestamps may not be standardized across languages, use microseconds as the input # and nanoseconds as the output (this is the format expected in the OTLP trace protocol) event2_timestamp_microseconds = int(time.time_ns() / 1000) @@ -820,8 +793,7 @@ def test_otel_add_event_meta_serialization(self, test_agent, test_library): @missing_feature(context.library < "nodejs@5.17.0", reason="Implemented in v5.17.0 & v4.41.0") @missing_feature(context.library < "python@2.9.0", reason="Not implemented") def test_otel_record_exception_does_not_set_error(self, test_agent, test_library): - """ - Tests the Span.RecordException API (requires Span.AddEvent API support) + """Tests the Span.RecordException API (requires Span.AddEvent API support) and its serialization into the Datadog error tags and the 'events' tag """ with test_library: @@ -840,8 +812,7 @@ def test_otel_record_exception_does_not_set_error(self, test_agent, test_library @missing_feature(context.library < "nodejs@5.17.0", reason="Implemented in v5.17.0 & v4.41.0") @missing_feature(context.library < "python@2.9.0", reason="Not implemented") def test_otel_record_exception_meta_serialization(self, test_agent, test_library): - """ - Tests the Span.RecordException API (requires Span.AddEvent API support) + """Tests the Span.RecordException API (requires Span.AddEvent API support) and its serialization into the Datadog error tags and the 'events' tag """ with test_library: @@ -888,8 +859,7 @@ def test_otel_record_exception_meta_serialization(self, test_agent, test_library @missing_feature(context.library == "nodejs", reason="Otel Node.js API does not support attributes") @missing_feature(context.library < "python@2.9.0", reason="Not implemented") def test_otel_record_exception_attributes_serialization(self, test_agent, test_library): - """ - Tests the Span.RecordException API (requires Span.AddEvent API support) + """Tests the Span.RecordException API (requires Span.AddEvent API support) and its serialization into the Datadog error tags and the 'events' tag """ with test_library: @@ -935,8 +905,7 @@ def test_otel_record_exception_attributes_serialization(self, test_agent, test_l @missing_feature(context.library < "nodejs@5.17.0", reason="Implemented in v5.17.0 & v4.41.0") @missing_feature(context.library < "python@2.9.0", reason="Not implemented") def test_otel_record_exception_sets_all_error_tracking_tags(self, test_agent, test_library): - """ - Tests the Span.RecordException API (requires Span.AddEvent API support) + """Tests the Span.RecordException API (requires Span.AddEvent API support) and its serialization into the Datadog error tags and the 'events' tag """ with test_library: @@ -971,7 +940,7 @@ def run_operation_name_test(expected_operation_name: str, span_kind: int, attrib def run_otel_span_reserved_attributes_overrides_analytics_event( - analytics_event_value: Union[bool, str], expected_metric_value: Union[int, None], test_agent, test_library + analytics_event_value: bool | str, expected_metric_value: int | None, test_agent, test_library ): with test_library: with test_library.otel_start_span("operation", span_kind=SpanKind.SERVER) as span: diff --git a/tests/parametric/test_otel_span_with_baggage.py b/tests/parametric/test_otel_span_with_baggage.py index 0543ef4f61..2fe051f8ce 100644 --- a/tests/parametric/test_otel_span_with_baggage.py +++ b/tests/parametric/test_otel_span_with_baggage.py @@ -1,10 +1,6 @@ -import time - import pytest -from utils.parametric.spec.trace import SAMPLING_PRIORITY_KEY, ORIGIN -from utils.parametric.spec.trace import find_only_span -from utils import missing_feature, irrelevant, context, scenarios, features +from utils import scenarios, features # this global mark applies to all tests in this file. # DD_TRACE_OTEL_ENABLED=true is required in some tracers (.NET, Python?) diff --git a/tests/parametric/test_otel_tracer.py b/tests/parametric/test_otel_tracer.py index e35e58fb9e..165b0637b6 100644 --- a/tests/parametric/test_otel_tracer.py +++ b/tests/parametric/test_otel_tracer.py @@ -18,9 +18,7 @@ class Test_Otel_Tracer: @irrelevant(context.library == "cpp", reason="library does not implement OpenTelemetry") def test_otel_simple_trace(self, test_agent, test_library): - """ - Perform two traces - """ + """Perform two traces""" with test_library: with test_library.otel_start_span("root_one") as parent1: parent1.set_attributes({"parent_k1": "parent_v1"}) @@ -55,9 +53,7 @@ def test_otel_simple_trace(self, test_agent, test_library): @missing_feature(context.library <= "java@1.23.0", reason="OTel resource naming implemented in 1.24.0") @missing_feature(context.library == "nodejs", reason="Not implemented") def test_otel_force_flush(self, test_agent, test_library): - """ - Verify that force flush flushed the spans - """ + """Verify that force flush flushed the spans""" with test_library: with test_library.otel_start_span(name="test_span") as span: pass diff --git a/tests/parametric/test_parametric_endpoints.py b/tests/parametric/test_parametric_endpoints.py index 7da7742304..d779e9e1fc 100644 --- a/tests/parametric/test_parametric_endpoints.py +++ b/tests/parametric/test_parametric_endpoints.py @@ -1,5 +1,4 @@ -""" -This module provides simple unit tests for each parametric endpoint. +"""This module provides simple unit tests for each parametric endpoint. The results of these unit tests are reported to the feature parity dashboard. Parametric endpoints that are not tested in this file are not yet supported. Avoid using those endpoints in the parametric tests. @@ -26,8 +25,7 @@ @features.parametric_endpoint_parity class Test_Parametric_DDSpan_Start: def test_start_span(self, test_agent, test_library): - """ - Validates that /trace/span/start creates a new span. + """Validates that /trace/span/start creates a new span. Supported Parameters: - name: str @@ -73,8 +71,7 @@ def test_start_span(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_DDSpan_Finish: def test_span_finish(self, test_agent, test_library): - """ - Validates that /trace/span/finish finishes a span and sends it to the agent. + """Validates that /trace/span/finish finishes a span and sends it to the agent. Supported Parameters: - span_id: Union[int, str] @@ -96,8 +93,7 @@ def test_span_finish(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_Inject_Headers: def test_inject_headers(self, test_agent, test_library): - """ - Validates that /trace/span/inject_headers generates distributed tracing headers from span data. + """Validates that /trace/span/inject_headers generates distributed tracing headers from span data. Supported Parameters: - span_id: Union[int, str] @@ -115,8 +111,7 @@ def test_inject_headers(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_DDTrace_Extract_Headers: def test_extract_headers(self, test_agent, test_library): - """ - Validates that /trace/span/extract_headers extracts span data from distributed tracing headers. + """Validates that /trace/span/extract_headers extracts span data from distributed tracing headers. Supported Parameters: - List[Tuple[str, str]] @@ -142,8 +137,7 @@ def test_extract_headers(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_DDSpan_Set_Meta: def test_set_meta(self, test_agent, test_library): - """ - Validates that /trace/span/set_meta sets a key value pair on a span. + """Validates that /trace/span/set_meta sets a key value pair on a span. Supported Parameters: - span_id: Union[int, str] @@ -165,8 +159,7 @@ def test_set_meta(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_DDSpan_Set_Metric: def test_set_metric(self, test_agent, test_library): - """ - Validates that /trace/span/set_metric sets a metric on a span. + """Validates that /trace/span/set_metric sets a metric on a span. Supported Parameters: - span_id: Union[int, str] @@ -188,8 +181,7 @@ def test_set_metric(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_DDSpan_Set_Error: def test_set_error(self, test_agent, test_library): - """ - Validates that /trace/span/error sets an error on a span. + """Validates that /trace/span/error sets an error on a span. Supported Parameters: - span_id: Union[int, str] @@ -215,8 +207,7 @@ def test_set_error(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_DDSpan_Set_Resource: def test_set_resource(self, test_agent, test_library): - """ - Validates that /trace/span/set_resource sets a resource name on a span. + """Validates that /trace/span/set_resource sets a resource name on a span. Supported Parameters: - span_id: Union[int, str] @@ -236,8 +227,7 @@ def test_set_resource(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_DDSpan_Add_Link: def test_add_link(self, test_agent, test_library): - """ - Validates that /trace/span/add_link adds a spanlink to a span. + """Validates that /trace/span/add_link adds a spanlink to a span. Supported Parameters: - span_id: Union[int, str] @@ -265,8 +255,7 @@ def test_add_link(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_DDTrace_Config: def test_config(self, test_agent, test_library): - """ - Validates that /trace/config returns a list of tracer configurations. This list is expected to + """Validates that /trace/config returns a list of tracer configurations. This list is expected to grow over time. Supported Parameters: @@ -300,8 +289,7 @@ def test_config(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_DDTrace_Crash: def test_crash(self, test_agent, test_library): - """ - Validates that /trace/crash crashes the current process. + """Validates that /trace/crash crashes the current process. Supported Parameters: Supported Return Values: @@ -315,8 +303,7 @@ def test_crash(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_DDTrace_Current_Span: def test_current_span(self, test_agent, test_library): - """ - Validates that /trace/span/current returns the active Datadog span. + """Validates that /trace/span/current returns the active Datadog span. Supported Parameters: Supported Return Values: @@ -343,8 +330,7 @@ def test_current_span(self, test_agent, test_library): assert int(dd_current_span.trace_id) == 0 def test_current_span_from_otel(self, test_agent, test_library): - """ - Validates that /trace/span/current can return the Datadog span that was created by the OTEL API. + """Validates that /trace/span/current can return the Datadog span that was created by the OTEL API. Supported Parameters: Supported Return Values: @@ -363,8 +349,7 @@ def test_current_span_from_otel(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_DDTrace_Flush: def test_flush(self, test_agent, test_library): - """ - Validates that /trace/span/flush and /trace/stats/flush endpoints are implemented and return successful status codes. + """Validates that /trace/span/flush and /trace/stats/flush endpoints are implemented and return successful status codes. If these endpoint are not implemented, spans and/or stats will not be flushed when the test_library contextmanager exits. Trace data may or may not be received by the agent in time for validation. This can introduce flakiness in tests. @@ -382,8 +367,7 @@ def test_flush(self, test_agent, test_library): @pytest.mark.parametrize("library_env", [{"DD_TRACE_PROPAGATION_HTTP_BAGGAGE_ENABLED": "true"}]) class Test_Parametric_DDTrace_Baggage: def test_set_baggage(self, test_agent, test_library): - """ - Validates that /trace/span/set_baggage sets a baggage item. + """Validates that /trace/span/set_baggage sets a baggage item. Supported Parameters: - span_id: Union[int, str] @@ -399,8 +383,7 @@ def test_set_baggage(self, test_agent, test_library): assert any("baggage" in header for header in headers) def test_get_baggage(self, test_agent, test_library): - """ - Validates that /trace/span/get_baggage gets a baggage item. + """Validates that /trace/span/get_baggage gets a baggage item. Supported Parameters: - span_id: Union[int, str] @@ -416,8 +399,7 @@ def test_get_baggage(self, test_agent, test_library): assert baggage == "value" def test_get_all_baggage(self, test_agent, test_library): - """ - Validates that /trace/span/get_all_baggage gets all baggage items. + """Validates that /trace/span/get_all_baggage gets all baggage items. Supported Parameters: - span_id: Union[int, str] @@ -434,8 +416,7 @@ def test_get_all_baggage(self, test_agent, test_library): assert baggage["key2"] == "value" def test_remove_baggage(self, test_agent, test_library): - """ - Validates that /trace/span/remove_baggage removes a baggage item. + """Validates that /trace/span/remove_baggage removes a baggage item. Supported Parameters: - span_id: Union[int, str] @@ -455,8 +436,7 @@ def test_remove_baggage(self, test_agent, test_library): assert not any("baggage" in header for header in headers) def test_remove_all_baggage(self, test_agent, test_library): - """ - Validates that /trace/span/remove_all_baggage removes all baggage items from a span. + """Validates that /trace/span/remove_all_baggage removes all baggage items from a span. Supported Parameters: - span_id: Union[int, str] @@ -480,8 +460,7 @@ def test_remove_all_baggage(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_OtelSpan_Start: def test_span_start(self, test_agent, test_library): - """ - Validates that the /trace/otel/start_span creates a new span. + """Validates that the /trace/otel/start_span creates a new span. Supported Parameters: - name: str @@ -531,8 +510,7 @@ def test_span_start(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_OtelSpan_End: def test_span_end(self, test_agent, test_library): - """ - Validates that the /trace/otel/end_span finishes a span and sends it to the agent + """Validates that the /trace/otel/end_span finishes a span and sends it to the agent Supported Parameters: - timestamp (μs): Optional[int] @@ -541,7 +519,7 @@ def test_span_end(self, test_agent, test_library): sleep = 0.2 t1 = time.time() with test_library: - with test_library.otel_start_span("otel_end_span", end_on_exit=True) as s1: + with test_library.otel_start_span("otel_end_span", end_on_exit=True): time.sleep(sleep) total_time = time.time() - t1 @@ -550,8 +528,7 @@ def test_span_end(self, test_agent, test_library): assert sleep <= span["duration"] / 1e9 <= total_time, span["start"] def test_span_end_with_timestamp(self, test_agent, test_library): - """ - Validates that the /trace/otel/end_span finishes a span and sends it to the agent with the expected duration + """Validates that the /trace/otel/end_span finishes a span and sends it to the agent with the expected duration Supported Parameters: - timestamp (μs): Optional[int] @@ -574,8 +551,7 @@ def test_span_end_with_timestamp(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_OtelSpan_Set_Attribute: def test_otel_set_attribute(self, test_agent, test_library): - """ - Validates that /trace/otel/set_attributes sets a key value pair on a span. + """Validates that /trace/otel/set_attributes sets a key value pair on a span. Supported Parameters: - span_id: Union[int, str] @@ -595,8 +571,7 @@ def test_otel_set_attribute(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_OtelSpan_Set_Status: def test_otel_set_status(self, test_agent, test_library): - """ - Validates that /trace/otel/set_status sets a status on a span. + """Validates that /trace/otel/set_status sets a status on a span. Supported Parameters: - span_id: Union[int, str] @@ -617,8 +592,7 @@ def test_otel_set_status(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_OtelSpan_Set_Name: def test_otelspan_set_name(self, test_agent, test_library): - """ - Validates that /trace/otel/set_name sets the resource name on a span. + """Validates that /trace/otel/set_name sets the resource name on a span. Supported Parameters: - span_id: Union[int, str] @@ -638,8 +612,7 @@ def test_otelspan_set_name(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_OtelSpan_Events: def test_add_event(self, test_agent, test_library): - """ - Validates that /trace/otel/add_event adds an event to a span. + """Validates that /trace/otel/add_event adds an event to a span. Supported Parameters: - span_id: Union[int, str] @@ -664,8 +637,7 @@ def test_add_event(self, test_agent, test_library): @irrelevant(context.library == "golang", reason="OTEL does not expose an API for recording exceptions") @bug(library="nodejs", reason="APMAPI-778") # doees not set attributes on the exception event def test_record_exception(self, test_agent, test_library): - """ - Validates that /trace/otel/record_exception adds an exception event to a span. + """Validates that /trace/otel/record_exception adds an exception event to a span. Supported Parameters: - span_id: Union[int, str] @@ -690,8 +662,7 @@ def test_record_exception(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_OtelSpan_Is_Recording: def test_is_recording(self, test_agent, test_library): - """ - Validates that /trace/otel/is_recording returns whether a span is recording. + """Validates that /trace/otel/is_recording returns whether a span is recording. Supported Parameters: - span_id: Union[int, str] @@ -706,8 +677,7 @@ def test_is_recording(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_Otel_Baggage: def test_set_baggage(self, test_agent, test_library): - """ - Validates that /trace/otel/otel_set_baggage sets a baggage item. + """Validates that /trace/otel/otel_set_baggage sets a baggage item. Supported Parameters: - span_id: Union[int, str] @@ -724,8 +694,7 @@ def test_set_baggage(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_Otel_Current_Span: def test_otel_current_span(self, test_agent, test_library): - """ - Validates that /trace/otel/current_span returns the current span. + """Validates that /trace/otel/current_span returns the current span. Supported Parameters: Supported Return Values: @@ -756,15 +725,14 @@ def test_otel_current_span(self, test_agent, test_library): @features.parametric_endpoint_parity class Test_Parametric_Otel_Trace_Flush: def test_flush(self, test_agent, test_library): - """ - Validates that /trace/otel/flush flushes all finished spans. + """Validates that /trace/otel/flush flushes all finished spans. Supported Parameters: - timeout_sec: int Supported Return Values: - success: boolean """ - with test_library.otel_start_span("test_otel_flush") as s1: + with test_library.otel_start_span("test_otel_flush"): pass assert test_library.otel_flush(timeout_sec=5) diff --git a/tests/parametric/test_partial_flushing.py b/tests/parametric/test_partial_flushing.py index 03db32448c..1b8161e3a7 100644 --- a/tests/parametric/test_partial_flushing.py +++ b/tests/parametric/test_partial_flushing.py @@ -1,6 +1,6 @@ import pytest from utils.parametric.spec.trace import find_span, find_trace -from utils import missing_feature, bug, features, context, scenarios +from utils import missing_feature, features, context, scenarios @features.partial_flush @@ -13,8 +13,7 @@ class Test_Partial_Flushing: context.library == "java", reason="java uses '>' so it needs one more span to force a partial flush" ) def test_partial_flushing_one_span(self, test_agent, test_library): - """ - Create a trace with a root span and a single child. Finish the child, and ensure + """Create a trace with a root span and a single child. Finish the child, and ensure partial flushing triggers. This test explicitly enables partial flushing. """ do_partial_flush_test(self, test_agent, test_library) @@ -26,8 +25,7 @@ def test_partial_flushing_one_span(self, test_agent, test_library): @missing_feature(context.library == "golang", reason="partial flushing not enabled by default") @missing_feature(context.library == "dotnet", reason="partial flushing not enabled by default") def test_partial_flushing_one_span_default(self, test_agent, test_library): - """ - Create a trace with a root span and a single child. Finish the child, and ensure + """Create a trace with a root span and a single child. Finish the child, and ensure partial flushing triggers. This test assumes partial flushing is enabled by default. """ do_partial_flush_test(self, test_agent, test_library) @@ -36,8 +34,7 @@ def test_partial_flushing_one_span_default(self, test_agent, test_library): "library_env", [{"DD_TRACE_PARTIAL_FLUSH_MIN_SPANS": "5", "DD_TRACE_PARTIAL_FLUSH_ENABLED": "true"}] ) def test_partial_flushing_under_limit_one_payload(self, test_agent, test_library): - """ - Create a trace with a root span and a single child. Finish the child, and ensure + """Create a trace with a root span and a single child. Finish the child, and ensure partial flushing does NOT trigger, since the partial flushing min spans is set to 5. """ no_partial_flush_test(self, test_agent, test_library) @@ -46,16 +43,14 @@ def test_partial_flushing_under_limit_one_payload(self, test_agent, test_library "library_env", [{"DD_TRACE_PARTIAL_FLUSH_MIN_SPANS": "1", "DD_TRACE_PARTIAL_FLUSH_ENABLED": "false"}] ) def test_partial_flushing_disabled(self, test_agent, test_library): - """ - Create a trace with a root span and a single child. Finish the child, and ensure + """Create a trace with a root span and a single child. Finish the child, and ensure partial flushing does NOT trigger, since it's explicitly disabled. """ no_partial_flush_test(self, test_agent, test_library) def do_partial_flush_test(self, test_agent, test_library): - """ - Create a trace with a root span and a single child. Finish the child, and ensure + """Create a trace with a root span and a single child. Finish the child, and ensure partial flushing triggers. """ with test_library: @@ -79,8 +74,7 @@ def do_partial_flush_test(self, test_agent, test_library): def no_partial_flush_test(self, test_agent, test_library): - """ - Create a trace with a root span and one child. Finish the child, and ensure + """Create a trace with a root span and one child. Finish the child, and ensure partial flushing does NOT trigger. """ with test_library: diff --git a/tests/parametric/test_sampling_span_tags.py b/tests/parametric/test_sampling_span_tags.py index d08c0b4952..8a577dea1b 100644 --- a/tests/parametric/test_sampling_span_tags.py +++ b/tests/parametric/test_sampling_span_tags.py @@ -1,20 +1,20 @@ import json import pytest -from utils import bug, context, scenarios, features # noqa -from utils.parametric.spec.trace import MANUAL_DROP_KEY # noqa -from utils.parametric.spec.trace import MANUAL_KEEP_KEY # noqa -from utils.parametric.spec.trace import SAMPLING_AGENT_PRIORITY_RATE # noqa -from utils.parametric.spec.trace import SAMPLING_DECISION_MAKER_KEY # noqa -from utils.parametric.spec.trace import SAMPLING_LIMIT_PRIORITY_RATE # noqa -from utils.parametric.spec.trace import SAMPLING_PRIORITY_KEY # noqa -from utils.parametric.spec.trace import SAMPLING_RULE_PRIORITY_RATE # noqa -from utils.parametric.spec.trace import find_span_in_traces # noqa +from utils import bug, context, scenarios, features +from utils.parametric.spec.trace import MANUAL_DROP_KEY +from utils.parametric.spec.trace import MANUAL_KEEP_KEY +from utils.parametric.spec.trace import SAMPLING_AGENT_PRIORITY_RATE +from utils.parametric.spec.trace import SAMPLING_DECISION_MAKER_KEY +from utils.parametric.spec.trace import SAMPLING_LIMIT_PRIORITY_RATE +from utils.parametric.spec.trace import SAMPLING_PRIORITY_KEY +from utils.parametric.spec.trace import SAMPLING_RULE_PRIORITY_RATE +from utils.parametric.spec.trace import find_span_in_traces UNSET = -420 -class AnyRatio(object): +class AnyRatio: def __eq__(self, other): return 0 <= other <= 1 diff --git a/tests/parametric/test_span_events.py b/tests/parametric/test_span_events.py index 7959587fe2..51199e640b 100644 --- a/tests/parametric/test_span_events.py +++ b/tests/parametric/test_span_events.py @@ -2,7 +2,6 @@ import pytest from utils import scenarios, missing_feature, features, rfc -from utils.parametric._library_client import Event from utils.parametric.spec.trace import find_span, find_trace diff --git a/tests/parametric/test_span_links.py b/tests/parametric/test_span_links.py index 7de2bcfc09..07073edfdf 100644 --- a/tests/parametric/test_span_links.py +++ b/tests/parametric/test_span_links.py @@ -6,12 +6,12 @@ from utils.parametric.spec.trace import AUTO_DROP_KEY from utils.parametric.spec.trace import span_has_no_parent from utils.parametric.spec.tracecontext import TRACECONTEXT_FLAGS_SET -from utils import scenarios, missing_feature -from utils.parametric._library_client import Link +from utils import scenarios, missing_feature, features from utils.parametric.spec.trace import retrieve_span_links, find_span, find_trace, find_span_in_traces @scenarios.parametric +@features.span_links class Test_Span_Links: @pytest.mark.parametrize("library_env", [{"DD_TRACE_API_VERSION": "v0.4"}]) @missing_feature(library="nodejs", reason="only supports span links encoding through _dd.span_links tag") diff --git a/tests/parametric/test_span_sampling.py b/tests/parametric/test_span_sampling.py index ce9ad4e766..971b96d62d 100644 --- a/tests/parametric/test_span_sampling.py +++ b/tests/parametric/test_span_sampling.py @@ -22,13 +22,15 @@ class Test_Span_Sampling: { "DD_SPAN_SAMPLING_RULES": json.dumps([{"service": "webserver", "name": "web.request"}]), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', } ], ) def test_single_rule_match_span_sampling_sss001(self, test_agent, test_library): """Test that span sampling tags are added when both: 1. a span sampling rule matches - 2. tracer is set to drop the trace manually""" + 2. tracer is set to drop the trace manually + """ with test_library: with test_library.dd_start_span(name="web.request", service="webserver") as span: pass @@ -45,6 +47,7 @@ def test_single_rule_match_span_sampling_sss001(self, test_agent, test_library): { "DD_SPAN_SAMPLING_RULES": json.dumps([{"service": "webse*", "name": "web.re?uest"}]), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', } ], ) @@ -65,6 +68,7 @@ def test_special_glob_characters_span_sampling_sss002(self, test_agent, test_lib { "DD_SPAN_SAMPLING_RULES": json.dumps([{"service": "notmatching", "name": "notmatching"}]), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', } ], ) @@ -84,7 +88,14 @@ def test_single_rule_no_match_span_sampling_sss003(self, test_agent, test_librar @missing_feature(context.library == "ruby", reason="Issue: _dd.span_sampling.max_per_second is always set in Ruby") @pytest.mark.parametrize( - "library_env", [{"DD_SPAN_SAMPLING_RULES": json.dumps([{"service": "webserver"}]), "DD_TRACE_SAMPLE_RATE": 0}] + "library_env", + [ + { + "DD_SPAN_SAMPLING_RULES": json.dumps([{"service": "webserver"}]), + "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', + } + ], ) def test_single_rule_only_service_pattern_match_span_sampling_sss004(self, test_agent, test_library): """Test span sampling tags are added when both: @@ -100,7 +111,14 @@ def test_single_rule_only_service_pattern_match_span_sampling_sss004(self, test_ assert span["metrics"].get(SINGLE_SPAN_SAMPLING_MAX_PER_SEC) is None @pytest.mark.parametrize( - "library_env", [{"DD_SPAN_SAMPLING_RULES": json.dumps([{"name": "no_match"}]), "DD_TRACE_SAMPLE_RATE": 0}] + "library_env", + [ + { + "DD_SPAN_SAMPLING_RULES": json.dumps([{"name": "no_match"}]), + "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', + } + ], ) def test_single_rule_only_name_pattern_no_match_span_sampling_sss005(self, test_agent, test_library): """Test span sampling tags are not added when: @@ -127,6 +145,7 @@ def test_single_rule_only_name_pattern_no_match_span_sampling_sss005(self, test_ ] ), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', } ], ) @@ -159,6 +178,7 @@ def test_multi_rule_keep_drop_span_sampling_sss006(self, test_agent, test_librar ] ), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', } ], ) @@ -192,6 +212,7 @@ def test_multi_rule_drop_keep_span_sampling_sss007(self, test_agent, test_librar [{"service": "webserver", "name": "web.request", "max_per_second": 2}] ), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', } ], ) @@ -246,6 +267,7 @@ def test_single_rule_rate_limiter_span_sampling_sss008(self, test_agent, test_li [{"service": "webserver", "name": "web.request", "sample_rate": 0.5}] ), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', } ], ) @@ -313,6 +335,7 @@ def test_sampling_rate_not_absolute_value_sss009(self, test_agent, test_library) { "DD_SPAN_SAMPLING_RULES": json.dumps([{"service": "webserver", "name": "web.request"}]), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', "DD_TRACE_STATS_COMPUTATION_ENABLED": "True", } ], @@ -344,13 +367,15 @@ def test_keep_span_with_stats_computation_sss010(self, test_agent, test_library) [{"service": "webserver", "name": "web.request", "sample_rate": 1.0}] ), "DD_TRACE_SAMPLE_RATE": 1.0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":1.0}]', } ], ) def test_single_rule_always_keep_span_sampling_sss011(self, test_agent, test_library): """Test that spans are always kept when the sampling rule matches and has sample_rate:1.0 regardless of tracer decision. - Basically, if we have a rule for spans with sample_rate:1.0 we should always keep those spans, either due to trace sampling or span sampling""" + Basically, if we have a rule for spans with sample_rate:1.0 we should always keep those spans, either due to trace sampling or span sampling + """ # This span is set to be dropped by the tracer/user, however it is kept by span sampling with test_library: with test_library.dd_start_span(name="web.request", service="webserver") as s1: @@ -380,6 +405,7 @@ def test_single_rule_always_keep_span_sampling_sss011(self, test_agent, test_lib [{"service": "webserver", "name": "web.request", "sample_rate": 0}] ), "DD_TRACE_SAMPLE_RATE": 1.0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":1.0}]', } ], ) @@ -417,6 +443,7 @@ def test_single_rule_tracer_always_keep_span_sampling_sss012(self, test_agent, t ] ), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', } ], ) @@ -482,6 +509,7 @@ def test_multi_rule_independent_rate_limiters_sss013(self, test_agent, test_libr [{"service": "webserver", "name": "parent", "sample_rate": 1.0, "max_per_second": 50}] ), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', } ], ) @@ -524,6 +552,7 @@ def test_root_span_selected_by_sss014(self, test_agent, test_library): [{"service": "webserver", "name": "child", "sample_rate": 1.0, "max_per_second": 50}] ), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', } ], ) @@ -570,6 +599,7 @@ def test_child_span_selected_by_sss015(self, test_agent, test_library): [{"service": "webserver", "name": "parent", "sample_rate": 1.0, "max_per_second": 50}] ), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', "DD_TRACE_TRACER_METRICS_ENABLED": "true", # This activates dropping policy for Java Tracer "DD_TRACE_FEATURES": "discovery", # This activates dropping policy for Go Tracer } @@ -584,10 +614,10 @@ def test_root_span_selected_and_child_dropped_by_sss_when_dropping_policy_is_act We're essentially testing to make sure that the child unsampled span is dropped on the tracer side because of the activate dropping policy. """ - assert test_agent.info()["client_drop_p0s"] == True, "Client drop p0s expected to be enabled" + assert test_agent.info()["client_drop_p0s"] is True, "Client drop p0s expected to be enabled" with test_library: - with test_library.dd_start_span(name="parent", service="webserver") as s1: + with test_library.dd_start_span(name="parent", service="webserver"): pass # expect the first trace kept by the tracer despite of the active dropping policy because of SSS @@ -627,6 +657,7 @@ def test_root_span_selected_and_child_dropped_by_sss_when_dropping_policy_is_act [{"service": "webserver", "name": "child", "sample_rate": 1.0, "max_per_second": 50}] ), "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', "DD_TRACE_TRACER_METRICS_ENABLED": "true", # This activates dropping policy for Java Tracer "DD_TRACE_FEATURES": "discovery", # This activates dropping policy for Go Tracer } @@ -641,11 +672,11 @@ def test_child_span_selected_and_root_dropped_by_sss_when_dropping_policy_is_act We're essentially testing to make sure that the root unsampled span is dropped on the tracer side because of the activate dropping policy. """ - assert test_agent.info()["client_drop_p0s"] == True, "Client drop p0s expected to be enabled" + assert test_agent.info()["client_drop_p0s"] is True, "Client drop p0s expected to be enabled" with test_library: with test_library.dd_start_span(name="parent", service="webserver") as ps1: - with test_library.dd_start_span(name="child", service="webserver", parent_id=ps1.span_id) as cs1: + with test_library.dd_start_span(name="child", service="webserver", parent_id=ps1.span_id): pass # expect the first trace kept by the tracer despite of the active dropping policy because of SSS @@ -682,6 +713,7 @@ def test_child_span_selected_and_root_dropped_by_sss_when_dropping_policy_is_act [ { "DD_TRACE_SAMPLE_RATE": 0, + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0}]', "DD_TRACE_TRACER_METRICS_ENABLED": "true", # This activates dropping policy for Java Tracer "DD_TRACE_FEATURES": "discovery", # This activates dropping policy for Go Tracer } @@ -694,7 +726,7 @@ def test_entire_trace_dropped_when_dropping_policy_is_active018(self, test_agent We're essentially testing to make sure that the entire unsampled trace is dropped on the tracer side because of the activate dropping policy. """ - assert test_agent.info()["client_drop_p0s"] == True, "Client drop p0s expected to be enabled" + assert test_agent.info()["client_drop_p0s"] is True, "Client drop p0s expected to be enabled" with test_library: with test_library.dd_start_span(name="parent", service="webserver"): diff --git a/tests/parametric/test_telemetry.py b/tests/parametric/test_telemetry.py index 65b7490512..c76ef341e4 100644 --- a/tests/parametric/test_telemetry.py +++ b/tests/parametric/test_telemetry.py @@ -1,19 +1,15 @@ -""" -Test the telemetry that should be emitted from the library. -""" +"""Test the telemetry that should be emitted from the library.""" import base64 import copy import json import time -from typing import Any import uuid import pytest -from typing import List, Optional from utils.telemetry_utils import TelemetryUtils -from utils import context, scenarios, rfc, features, missing_feature, bug +from utils import context, scenarios, rfc, features, missing_feature telemetry_name_mapping = { @@ -92,26 +88,27 @@ def test_library_settings(self, library_env, test_agent, test_library): # The Go tracer does not support logs injection. if context.library == "golang" and apm_telemetry_name in ("logs_injection_enabled",): continue - if context.library == "cpp": - unsupported_fields = ( - "logs_injection_enabled", - "trace_header_tags", - "profiling_enabled", - "appsec_enabled", - "data_streams_enabled", - "trace_sample_rate", - ) - if apm_telemetry_name in unsupported_fields: - continue + if context.library == "cpp" and apm_telemetry_name in ( + "logs_injection_enabled", + "trace_header_tags", + "profiling_enabled", + "appsec_enabled", + "data_streams_enabled", + "trace_sample_rate", + ): + continue + if context.library == "python" and apm_telemetry_name in ("trace_sample_rate",): + # DD_TRACE_SAMPLE_RATE is not supported in ddtrace>=3.x + continue apm_telemetry_name = _mapped_telemetry_name(context, apm_telemetry_name) cfg_item = configuration_by_name.get(apm_telemetry_name) - assert cfg_item is not None, "Missing telemetry config item for '{}'".format(apm_telemetry_name) + assert cfg_item is not None, f"Missing telemetry config item for '{apm_telemetry_name}'" if isinstance(value, tuple): - assert cfg_item.get("value") in value, "Unexpected value for '{}'".format(apm_telemetry_name) + assert cfg_item.get("value") in value, f"Unexpected value for '{apm_telemetry_name}'" else: - assert cfg_item.get("value") == value, "Unexpected value for '{}'".format(apm_telemetry_name) - assert cfg_item.get("origin") == "default", "Unexpected origin for '{}'".format(apm_telemetry_name) + assert cfg_item.get("value") == value, f"Unexpected value for '{apm_telemetry_name}'" + assert cfg_item.get("origin") == "default", f"Unexpected origin for '{apm_telemetry_name}'" @scenarios.parametric @@ -249,36 +246,35 @@ def test_library_settings(self, library_env, test_agent, test_library): # The Go tracer does not support logs injection. if context.library == "golang" and apm_telemetry_name in ("logs_injection_enabled",): continue - if context.library == "cpp": - unsupported_fields = ( - "logs_injection_enabled", - "trace_header_tags", - "profiling_enabled", - "appsec_enabled", - "data_streams_enabled", - ) - if apm_telemetry_name in unsupported_fields: - continue + if context.library == "cpp" and apm_telemetry_name in ( + "logs_injection_enabled", + "trace_header_tags", + "profiling_enabled", + "appsec_enabled", + "data_streams_enabled", + ): + continue + if context.library == "python" and apm_telemetry_name in ("trace_sample_rate",): + # DD_TRACE_SAMPLE_RATE is not supported in ddtrace>=3.x + continue apm_telemetry_name = _mapped_telemetry_name(context, apm_telemetry_name) cfg_item = configuration_by_name.get(apm_telemetry_name) - assert cfg_item is not None, "Missing telemetry config item for '{}'".format(apm_telemetry_name) + assert cfg_item is not None, f"Missing telemetry config item for '{apm_telemetry_name}'" if isinstance(environment_value, tuple): - assert cfg_item.get("value") in environment_value, "Unexpected value for '{}'".format( - apm_telemetry_name - ) + assert cfg_item.get("value") in environment_value, f"Unexpected value for '{apm_telemetry_name}'" else: - assert cfg_item.get("value") == environment_value, "Unexpected value for '{}'".format( - apm_telemetry_name - ) - assert cfg_item.get("origin") == "env_var", "Unexpected origin for '{}'".format(apm_telemetry_name) + assert cfg_item.get("value") == environment_value, f"Unexpected value for '{apm_telemetry_name}'" + assert cfg_item.get("origin") == "env_var", f"Unexpected origin for '{apm_telemetry_name}'" @missing_feature(context.library == "dotnet", reason="Not implemented") @missing_feature(context.library == "java", reason="Not implemented") @missing_feature(context.library == "ruby", reason="Not implemented") @missing_feature(context.library == "php", reason="Not implemented") @missing_feature(context.library == "cpp", reason="Not implemented") - @missing_feature(context.library < "python@2.18.0.dev", reason="Not implemented") + @missing_feature( + context.library <= "python@3.1.0", reason="OTEL Sampling config is mapped to a different datadog config" + ) @pytest.mark.parametrize( "library_env", [ @@ -292,6 +288,8 @@ def test_library_settings(self, library_env, test_agent, test_library): "DD_TRACE_LOG_LEVEL": "error", "DD_LOG_LEVEL": "error", "OTEL_LOG_LEVEL": "debug", + # python tracer supports DD_TRACE_SAMPLING_RULES not DD_TRACE_SAMPLE_RATE + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0.5}]', "DD_TRACE_SAMPLE_RATE": "0.5", "OTEL_TRACES_SAMPLER": "traceidratio", "OTEL_TRACES_SAMPLER_ARG": "0.1", @@ -332,16 +330,21 @@ def test_telemetry_otel_env_hiding(self, library_env, test_agent, test_library): else: otelsampler_config = "otel_traces_sampler_arg" - dd_to_otel_mapping: List[List[Optional[str]]] = [ + if context.library == "python": + ddsampling_config = "dd_trace_sampling_rules" + else: + ddsampling_config = "dd_trace_sample_rate" + + dd_to_otel_mapping: list[list[str | None]] = [ ["dd_trace_propagation_style", "otel_propagators"], ["dd_service", "otel_service_name"], - ["dd_trace_sample_rate", "otel_traces_sampler"], + [ddsampling_config, "otel_traces_sampler"], ["dd_trace_enabled", "otel_traces_exporter"], ["dd_runtime_metrics_enabled", "otel_metrics_exporter"], ["dd_tags", "otel_resource_attributes"], ["dd_trace_otel_enabled", "otel_sdk_disabled"], [ddlog_config, "otel_log_level"], - ["dd_trace_sample_rate", otelsampler_config], + [ddsampling_config, otelsampler_config], ] for dd_config, otel_config in dd_to_otel_mapping: @@ -362,7 +365,9 @@ def test_telemetry_otel_env_hiding(self, library_env, test_agent, test_library): @missing_feature(context.library == "ruby", reason="Not implemented") @missing_feature(context.library == "php", reason="Not implemented") @missing_feature(context.library == "cpp", reason="Not implemented") - @missing_feature(context.library < "python@2.18.0.dev", reason="Not implemented") + @missing_feature( + context.library <= "python@3.1.0", reason="OTEL Sampling config is mapped to a different datadog config" + ) @missing_feature( context.library == "nodejs", reason="does not collect otel_env.invalid metrics for otel_resource_attributes" ) @@ -413,15 +418,20 @@ def test_telemetry_otel_env_invalid(self, library_env, test_agent, test_library) else: otelsampler_config = "otel_traces_sampler_arg" - dd_to_otel_mapping: List[List[Optional[str]]] = [ + if context.library == "python": + ddsampling_config = "dd_trace_sampling_rules" + else: + ddsampling_config = "dd_trace_sample_rate" + + dd_to_otel_mapping: list[list[str | None]] = [ ["dd_trace_propagation_style", "otel_propagators"], - ["dd_trace_sample_rate", "otel_traces_sampler"], + [ddsampling_config, "otel_traces_sampler"], ["dd_trace_enabled", "otel_traces_exporter"], ["dd_runtime_metrics_enabled", "otel_metrics_exporter"], ["dd_tags", "otel_resource_attributes"], ["dd_trace_otel_enabled", "otel_sdk_disabled"], [ddlog_config, "otel_log_level"], - ["dd_trace_sample_rate", otelsampler_config], + [ddsampling_config, otelsampler_config], [None, "otel_logs_exporter"], ] @@ -448,9 +458,7 @@ def test_telemetry_otel_env_invalid(self, library_env, test_agent, test_library) @scenarios.parametric @features.telemetry_app_started_event class Test_TelemetryInstallSignature: - """ - This telemetry provides insights into how a library was installed. - """ + """This telemetry provides insights into how a library was installed.""" @pytest.mark.parametrize( "library_env", @@ -484,7 +492,7 @@ def test_telemetry_event_propagated(self, library_env, test_agent, test_library) continue assert ( "install_signature" in body["payload"] - ), "The install signature should be included in the telemetry event, got {}".format(body) + ), f"The install signature should be included in the telemetry event, got {body}" assert ( "install_id" in body["payload"]["install_signature"] ), "The install id should be included in the telemetry event, got {}".format( @@ -510,9 +518,8 @@ def test_telemetry_event_propagated(self, library_env, test_agent, test_library) @pytest.mark.parametrize("library_env", [{**DEFAULT_ENVVARS}]) def test_telemetry_event_not_propagated(self, library_env, test_agent, test_library): - """ - When instrumentation data is not propagated to the library - The telemetry event should not contain telemetry as the Agent will add it when not present. + """When instrumentation data is not propagated to the library + The telemetry event should not contain telemetry as the Agent will add it when not present. """ # Some libraries require a first span for telemetry to be emitted. @@ -527,16 +534,14 @@ def test_telemetry_event_not_propagated(self, library_env, test_agent, test_libr if "payload" in body: assert ( "install_signature" not in body["payload"] - ), "The install signature should not be included in the telemetry event, got {}".format(body) + ), f"The install signature should not be included in the telemetry event, got {body}" @rfc("https://docs.google.com/document/d/1xTLC3UEGNooZS0YOYp3swMlAhtvVn1aa639TGxHHYvg/edit") @scenarios.parametric @features.telemetry_app_started_event class Test_TelemetrySCAEnvVar: - """ - This telemetry entry has the value of DD_APPSEC_SCA_ENABLED in the library. - """ + """This telemetry entry has the value of DD_APPSEC_SCA_ENABLED in the library.""" @staticmethod def flatten_message_batch(requests): @@ -570,12 +575,11 @@ def get_app_started_configuration_by_name(test_agent, test_library): assert ( "configuration" in body["payload"] - ), "The configuration should be included in the telemetry event, got {}".format(body) + ), f"The configuration should be included in the telemetry event, got {body}" configuration = body["payload"]["configuration"] - configuration_by_name = {item["name"]: item for item in configuration} - return configuration_by_name + return {item["name"]: item for item in configuration} return None @@ -602,7 +606,7 @@ def test_telemetry_sca_enabled_propagated( DD_APPSEC_SCA_ENABLED = TelemetryUtils.get_dd_appsec_sca_enabled_str(context.library) cfg_appsec_enabled = configuration_by_name.get(DD_APPSEC_SCA_ENABLED) - assert cfg_appsec_enabled is not None, "Missing telemetry config item for '{}'".format(DD_APPSEC_SCA_ENABLED) + assert cfg_appsec_enabled is not None, f"Missing telemetry config item for '{DD_APPSEC_SCA_ENABLED}'" if context.library == "java": outcome_value = str(outcome_value).lower() @@ -620,9 +624,7 @@ def test_telemetry_sca_enabled_not_propagated(self, library_env, test_agent, tes if context.library in ("java", "nodejs", "python"): cfg_appsec_enabled = configuration_by_name.get(DD_APPSEC_SCA_ENABLED) - assert cfg_appsec_enabled is not None, "Missing telemetry config item for '{}'".format( - DD_APPSEC_SCA_ENABLED - ) + assert cfg_appsec_enabled is not None, f"Missing telemetry config item for '{DD_APPSEC_SCA_ENABLED}'" assert cfg_appsec_enabled.get("value") is None else: - assert DD_APPSEC_SCA_ENABLED not in configuration_by_name.keys() + assert DD_APPSEC_SCA_ENABLED not in configuration_by_name diff --git a/tests/parametric/test_trace_sampling.py b/tests/parametric/test_trace_sampling.py index 1697327a6a..32a1d72dda 100644 --- a/tests/parametric/test_trace_sampling.py +++ b/tests/parametric/test_trace_sampling.py @@ -80,7 +80,7 @@ def test_trace_dropped_by_trace_sampling_rule(self, test_agent, test_library): { "DD_TRACE_SAMPLE_RATE": 1, "DD_TRACE_SAMPLING_RULES": json.dumps( - [{"service": "webserver", "resource": "drop-me", "sample_rate": 0}] + [{"service": "webserver", "resource": "drop-me", "sample_rate": 0}, {"sample_rate": 1}] ), } ], @@ -91,7 +91,6 @@ def test_trace_kept_in_spite_trace_sampling_rule(self, test_agent, test_library) with test_library.dd_start_span(name="web.request", service="webserver") as s1: s1.set_metric("sampling.priority", 2) s1.set_meta("resource.name", "drop-me") - pass span = find_only_span(test_agent.wait_for_num_traces(1)) assert span["metrics"].get(SAMPLING_PRIORITY_KEY) == 2 @@ -433,7 +432,9 @@ def test_trace_dropped_by_trace_sampling_rule_tags(self, test_agent, test_librar def tag_sampling_env(tag_glob_pattern): return { "DD_TRACE_SAMPLE_RATE": 0, - "DD_TRACE_SAMPLING_RULES": json.dumps([{"tags": {"tag": tag_glob_pattern}, "sample_rate": 1.0}]), + "DD_TRACE_SAMPLING_RULES": json.dumps( + [{"tags": {"tag": tag_glob_pattern}, "sample_rate": 1.0}, {"sample_rate": 0}] + ), } diff --git a/tests/parametric/test_tracer.py b/tests/parametric/test_tracer.py index d0ec2131e6..c71da9533c 100644 --- a/tests/parametric/test_tracer.py +++ b/tests/parametric/test_tracer.py @@ -1,5 +1,3 @@ -from typing import Dict - import pytest from utils.parametric.spec.trace import find_trace @@ -16,6 +14,7 @@ @scenarios.parametric +@features.trace_annotation class Test_Tracer: @missing_feature(context.library == "cpp", reason="metrics cannot be set manually") @missing_feature(context.library == "nodejs", reason="nodejs overrides the manually set service name") @@ -50,13 +49,12 @@ def test_tracer_span_top_level_attributes(self, test_agent: _TestAgentAPI, test_ class Test_TracerSCITagging: @parametrize("library_env", [{"DD_GIT_REPOSITORY_URL": "https://github.com/DataDog/dd-trace-go"}]) def test_tracer_repository_url_environment_variable( - self, library_env: Dict[str, str], test_agent: _TestAgentAPI, test_library: APMLibrary + self, library_env: dict[str, str], test_agent: _TestAgentAPI, test_library: APMLibrary ) -> None: - """ - When DD_GIT_REPOSITORY_URL is specified - When a trace chunk is emitted - The first span of the trace chunk should have the value of DD_GIT_REPOSITORY_URL - in meta._dd.git.repository_url + """When DD_GIT_REPOSITORY_URL is specified + When a trace chunk is emitted + The first span of the trace chunk should have the value of DD_GIT_REPOSITORY_URL + in meta._dd.git.repository_url """ with test_library: with test_library.dd_start_span("operation") as parent: @@ -76,13 +74,12 @@ def test_tracer_repository_url_environment_variable( @parametrize("library_env", [{"DD_GIT_COMMIT_SHA": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}]) def test_tracer_commit_sha_environment_variable( - self, library_env: Dict[str, str], test_agent: _TestAgentAPI, test_library: APMLibrary + self, library_env: dict[str, str], test_agent: _TestAgentAPI, test_library: APMLibrary ) -> None: - """ - When DD_GIT_COMMIT_SHA is specified - When a trace chunk is emitted - The first span of the trace chunk should have the value of DD_GIT_COMMIT_SHA - in meta._dd.git.commit.sha + """When DD_GIT_COMMIT_SHA is specified + When a trace chunk is emitted + The first span of the trace chunk should have the value of DD_GIT_COMMIT_SHA + in meta._dd.git.commit.sha """ with test_library: with test_library.dd_start_span("operation") as parent: @@ -136,13 +133,12 @@ def test_tracer_commit_sha_environment_variable( ) @missing_feature(context.library == "nodejs", reason="nodejs does not strip credentials yet") def test_tracer_repository_url_strip_credentials( - self, library_env: Dict[str, str], test_agent: _TestAgentAPI, test_library: APMLibrary + self, library_env: dict[str, str], test_agent: _TestAgentAPI, test_library: APMLibrary ) -> None: - """ - When DD_GIT_REPOSITORY_URL is specified - When a trace chunk is emitted - The first span of the trace chunk should have the value of DD_GIT_REPOSITORY_URL - in meta._dd.git.repository_url, with credentials removed if any + """When DD_GIT_REPOSITORY_URL is specified + When a trace chunk is emitted + The first span of the trace chunk should have the value of DD_GIT_REPOSITORY_URL + in meta._dd.git.repository_url, with credentials removed if any """ with test_library: with test_library.dd_start_span("operation") as parent: # type: ignore @@ -157,16 +153,16 @@ def test_tracer_repository_url_strip_credentials( @scenarios.parametric +@features.dd_service_mapping class Test_TracerUniversalServiceTagging: @missing_feature(reason="FIXME: library test client sets empty string as the service name") @parametrize("library_env", [{"DD_SERVICE": "service1"}]) def test_tracer_service_name_environment_variable( - self, library_env: Dict[str, str], test_agent: _TestAgentAPI, test_library: APMLibrary + self, library_env: dict[str, str], test_agent: _TestAgentAPI, test_library: APMLibrary ) -> None: - """ - When DD_SERVICE is specified - When a span is created - The span should use the value of DD_SERVICE for span.service + """When DD_SERVICE is specified + When a span is created + The span should use the value of DD_SERVICE for span.service """ with test_library: with test_library.dd_start_span("operation") as root: @@ -181,12 +177,11 @@ def test_tracer_service_name_environment_variable( @parametrize("library_env", [{"DD_ENV": "prod"}]) def test_tracer_env_environment_variable( - self, library_env: Dict[str, str], test_agent: _TestAgentAPI, test_library: APMLibrary + self, library_env: dict[str, str], test_agent: _TestAgentAPI, test_library: APMLibrary ) -> None: - """ - When DD_ENV is specified - When a span is created - The span should have the value of DD_ENV in meta.env + """When DD_ENV is specified + When a span is created + The span should have the value of DD_ENV in meta.env """ with test_library: with test_library.dd_start_span("operation") as root: diff --git a/tests/parametric/test_tracer_flare.py b/tests/parametric/test_tracer_flare.py index e019f0adba..c76bfcf3ad 100644 --- a/tests/parametric/test_tracer_flare.py +++ b/tests/parametric/test_tracer_flare.py @@ -1,17 +1,15 @@ -""" -Test the tracer flare feature of the APM libraries. -""" +"""Test the tracer flare feature of the APM libraries.""" import json import zipfile from base64 import b64decode from io import BytesIO -from typing import Any, Dict, List, Set +from typing import Any from uuid import uuid4 import pytest -from utils import rfc, scenarios, features, missing_feature, context, bug +from utils import rfc, scenarios, features, missing_feature, context from utils.dd_constants import RemoteConfigApplyState @@ -30,14 +28,14 @@ } -def _tracer_flare_task_config() -> Dict[str, Any]: +def _tracer_flare_task_config() -> dict[str, Any]: return { "args": {"case_id": "12345", "hostname": "my.hostname", "user_handle": "its.me@datadoghq.com"}, "task_type": "tracer_flare", } -def _flare_log_level_order() -> Dict[str, Any]: +def _flare_log_level_order() -> dict[str, Any]: return { "order": [], "internal_order": [ @@ -52,12 +50,11 @@ def _flare_log_level_order() -> Dict[str, Any]: } -def _java_tracer_flare_filenames() -> Set: +def _java_tracer_flare_filenames() -> set: return { "classpath.txt", "flare_info.txt", "dynamic_config.txt", - "flare_info.txt", "initial_config.txt", "instrumenter_metrics.txt", "instrumenter_state.txt", @@ -89,7 +86,7 @@ def _clear_log_level(test_agent, cfg_id: str) -> None: ) -def _add_task(test_agent, task_config: Dict[str, Any]) -> int: +def _add_task(test_agent, task_config: dict[str, Any]) -> int: """Helper to create an agent task in RC with the given task arguments.""" task_config["uuid"] = uuid4().hex task_id = hash(json.dumps(task_config)) @@ -106,7 +103,7 @@ def _clear_task(test_agent, task_id) -> None: ) -def trigger_tracer_flare_and_wait(test_agent, task_overrides: Dict[str, Any]) -> Dict: +def trigger_tracer_flare_and_wait(test_agent, task_overrides: dict[str, Any]) -> dict: """Creates a "trace_flare" agent task and waits for the tracer flare to be uploaded.""" task_config = _tracer_flare_task_config() task_args = task_config["args"] @@ -134,14 +131,14 @@ def assert_expected_files(content, min_files): def assert_java_log_file(content): flare_file = zipfile.ZipFile(BytesIO(b64decode(content))) - myfile = flare_file.open("tracer.log") + flare_file.open("tracer.log") # file content: 'No tracer log file specified and no prepare flare event received' assert flare_file.getinfo("tracer.log").file_size == 64, "tracer flare log file is not as expected" def assert_java_log_file_debug(content): flare_file = zipfile.ZipFile(BytesIO(b64decode(content))) - myfile = flare_file.open("tracer.log") + flare_file.open("tracer.log") assert flare_file.getinfo("tracer.log").file_size > 64, "tracer flare log file is not as expected" diff --git a/tests/perfs/process.py b/tests/perfs/process.py index 61dd6486d8..1bbdcd5b1f 100644 --- a/tests/perfs/process.py +++ b/tests/perfs/process.py @@ -4,7 +4,7 @@ from os import environ -LOG_FOLDER = environ["LOG_FOLDER"] if "LOG_FOLDER" in environ else "logs" +LOG_FOLDER = environ.get("LOG_FOLDER", "logs") LIBS = ("golang", "dotnet", "java", "nodejs", "php", "ruby") diff --git a/tests/perfs/test_performances.py b/tests/perfs/test_performances.py index a44dae914a..91d45ad8e6 100644 --- a/tests/perfs/test_performances.py +++ b/tests/perfs/test_performances.py @@ -6,7 +6,7 @@ import requests import docker -from utils import scenarios +from utils import scenarios, features MAX_CONCURRENT_REQUEST = 5 @@ -21,13 +21,14 @@ # WARMUP_LAST_SLEEP_DURATION = 1 # WEBLOG_URL="http://localhost:7777" @scenarios.performances +@features.not_reported class Test_Performances: def setup_main(self) -> None: - self.requests = [] + self.requests: list = [] self.build_requests() - self.results = [] - self.memory = [] + self.results: list = [] + self.memory: list = [] self.finished = False self.appsec = "with_appsec" if environ.get("DD_APPSEC_ENABLED") == "true" else "without_appsec" @@ -126,7 +127,7 @@ def watch_docker_target(self): print("MEM", datetime.now(), memory, flush=True) def test_main(self): - """add some tests ?""" + """Add some tests ?""" with open( f"{scenarios.performances.host_log_folder}/stats_{self.lang}_{self.appsec}.json", "w", encoding="utf-8" diff --git a/tests/remote_config/test_remote_configuration.py b/tests/remote_config/test_remote_configuration.py index 5f60ea00c2..efea0d45c1 100644 --- a/tests/remote_config/test_remote_configuration.py +++ b/tests/remote_config/test_remote_configuration.py @@ -11,7 +11,6 @@ context, interfaces, irrelevant, - missing_feature, rfc, scenarios, weblog, @@ -22,6 +21,25 @@ from utils.tools import logger +@rfc("https://docs.google.com/document/d/1bUVtEpXNTkIGvLxzkNYCxQzP2X9EK9HMBLHWXr_5KLM/edit#heading=h.vy1jegxy7cuc") +@features.remote_config_object_supported +class Test_NoError: + """A library should apply with no error all remote config payload.""" + + def test_no_error(self): + def no_error(data): + config_states = ( + data.get("request", {}).get("content", {}).get("client", {}).get("state", {}).get("config_states", {}) + ) + + for state in config_states: + error = state.get("apply_error", None) + if error is not None: + raise Exception(f"Error in remote config application: {error}") + + interfaces.library.validate_remote_configuration(no_error, success_by_default=True) + + @rfc("https://docs.google.com/document/d/1u_G7TOr8wJX0dOM_zUDKuRJgxoJU_hVTd5SeaMucQUs/edit#heading=h.octuyiil30ph") @features.remote_config_object_supported class RemoteConfigurationFieldsBasicTests: @@ -55,7 +73,7 @@ def validator(data): def dict_is_included(sub_dict: dict, main_dict: dict): - """returns true if every field/values in sub_dict are in main_dict""" + """Returns true if every field/values in sub_dict are in main_dict""" for key, value in sub_dict.items(): if key not in main_dict or value != main_dict[key]: @@ -65,8 +83,7 @@ def dict_is_included(sub_dict: dict, main_dict: dict): def dict_is_in_array(needle: dict, haystack: list, allow_additional_fields=True): - """ - returns true is needle is contained in haystack. + """Returns true is needle is contained in haystack. If allow_additional_field is true, needle can contains less field than the one in haystack """ @@ -159,7 +176,7 @@ def rc_check_request(data, expected, caching): raise ValidationError(f"{file} should not be in cached_target_files", extra_info=content) except Exception as e: e.args += (expected.get("test_description", "No description"),) - raise e + raise @rfc("https://docs.google.com/document/d/1u_G7TOr8wJX0dOM_zUDKuRJgxoJU_hVTd5SeaMucQUs/edit#heading=h.octuyiil30ph") @@ -187,7 +204,7 @@ def setup_tracer_update_sequence(self): @bug(library="golang", reason="APPSEC-56064") @bug(context.library < "java@1.13.0", reason="APMRP-360") def test_tracer_update_sequence(self): - """test update sequence, based on a scenario mocked in the proxy""" + """Test update sequence, based on a scenario mocked in the proxy""" with open("tests/remote_config/rc_expected_requests_asm_features.json", encoding="utf-8") as f: ASM_FEATURES_EXPECTED_REQUESTS = json.load(f) @@ -283,11 +300,11 @@ def setup_tracer_update_sequence(self): @bug(context.library < "java@1.13.0", reason="APMRP-360") def test_tracer_update_sequence(self): - """test update sequence, based on a scenario mocked in the proxy""" + """Test update sequence, based on a scenario mocked in the proxy""" # Index the request number by runtime ID so that we can support applications # that spawns multiple worker processes, each running its own RCM client. - request_number = defaultdict(int) + request_number: dict = defaultdict(int) with open("tests/remote_config/rc_expected_requests_live_debugging.json", encoding="utf-8") as f: LIVE_DEBUGGING_EXPECTED_REQUESTS = json.load(f) @@ -337,7 +354,7 @@ def setup_tracer_update_sequence(self): @bug(context.weblog_variant == "spring-boot-openliberty", reason="APPSEC-6721") @bug(context.library <= "java@1.12.1", reason="APMRP-360") def test_tracer_update_sequence(self): - """test update sequence, based on a scenario mocked in the proxy""" + """Test update sequence, based on a scenario mocked in the proxy""" self.assert_client_fields() @@ -367,8 +384,7 @@ def validate(data): @scenarios.remote_config_mocked_backend_asm_features_nocache @features.appsec_onboarding class Test_RemoteConfigurationUpdateSequenceFeaturesNoCache(RemoteConfigurationFieldsBasicTests): - """ - Tests that over a sequence of related updates, tracers follow the RFC for the Features product + """Tests that over a sequence of related updates, tracers follow the RFC for the Features product This test is not relevant for all tracers but C++ and ruby (missing feature). It may be never used if those languages directly implements cache feature. @@ -385,7 +401,7 @@ def setup_tracer_update_sequence(self): remote_config.send_sequential_commands(payloads) def test_tracer_update_sequence(self): - """test update sequence, based on a scenario mocked in the proxy""" + """Test update sequence, based on a scenario mocked in the proxy""" with open("tests/remote_config/rc_expected_requests_asm_features.json", encoding="utf-8") as f: ASM_FEATURES_EXPECTED_REQUESTS = json.load(f) @@ -430,7 +446,7 @@ def setup_tracer_update_sequence(self): remote_config.send_sequential_commands(payloads) def test_tracer_update_sequence(self): - """test update sequence, based on a scenario mocked in the proxy""" + """Test update sequence, based on a scenario mocked in the proxy""" self.assert_client_fields() diff --git a/tests/serverless/span_pointers/utils.py b/tests/serverless/span_pointers/utils.py index e24f4aaf94..5acc9cd406 100644 --- a/tests/serverless/span_pointers/utils.py +++ b/tests/serverless/span_pointers/utils.py @@ -30,8 +30,7 @@ def standard_hashing_function(elements: list[bytes]) -> PointerHash: def make_single_span_link_validator( resource: str, pointer_kind: str, pointer_direction: str, pointer_hash: PointerHash ): - """ - Make a validator function for use with interfaces.library.validate_spans. + """Make a validator function for use with interfaces.library.validate_spans. The validator checks that there is one and only one span pointer for the pointer_kind and pointer_direction and that its hash matches the pointer_hash. @@ -41,13 +40,13 @@ def validator(span): logger.debug("checking span: %s", span) if "span_links" not in span: - return + return None if "resource" not in span: - return + return None if span["resource"] != resource: - return + return None found_matching = False diff --git a/tests/stats/test_stats.py b/tests/stats/test_stats.py index cf4214f703..0d694e38da 100644 --- a/tests/stats/test_stats.py +++ b/tests/stats/test_stats.py @@ -1,4 +1,4 @@ -from utils import interfaces, weblog, features, scenarios, missing_feature, context, bug, flaky +from utils import interfaces, weblog, features, scenarios, missing_feature, context, bug from utils.tools import logger """ @@ -10,7 +10,7 @@ - Must have `is_trace_root` on trace root - Must set peer tags - Must have span_kind - + Config: - apm_config.peer_tags_aggregation (we should see peer service tags and aggregation by them, note only works on client or producer kind) - apm_config.compute_stats_by_span_kind (span_kind will be set and we will calc stats on these spans even when not "top level") @@ -45,8 +45,8 @@ def test_client_stats(self): no_content_top_hits += s["TopLevelHits"] else: assert False, "Unexpected status code " + str(s["HTTPStatusCode"]) - assert "weblog" == s["Service"], "expect weblog as service" - assert "web" == s["Type"], "expect 'web' type" + assert s["Service"] == "weblog", "expect weblog as service" + assert s["Type"] == "web", "expect 'web' type" assert ( stats_count <= 4 ), "expect <= 4 stats" # Normally this is exactly 2 but in certain high load this can flake and result in additional payloads where hits are split across two payloads @@ -62,10 +62,11 @@ def test_client_stats(self): def test_is_trace_root(self): """Test IsTraceRoot presence in stats. Note: Once all tracers have implmented it and the test xpasses for all of them, we can move these - assertions to `test_client_stats` method.""" + assertions to `test_client_stats` method. + """ for s in interfaces.agent.get_stats(resource="GET /stats-unique"): - assert 1 == s["IsTraceRoot"] - assert "server" == s["SpanKind"] + assert s["IsTraceRoot"] == 1 + assert s["SpanKind"] == "server" @scenarios.everything_disabled def test_disable(self): diff --git a/tests/telemetry_intake/static/config_norm_rules.json b/tests/telemetry_intake/static/config_norm_rules.json index 2ce4b994bd..b1e9150398 100644 --- a/tests/telemetry_intake/static/config_norm_rules.json +++ b/tests/telemetry_intake/static/config_norm_rules.json @@ -52,12 +52,12 @@ "DD_CIVISIBILITY_FORCE_AGENT_EVP_PROXY": "ci_visibility_force_agent_evp_proxy_enabled", "DD_CIVISIBILITY_GAC_INSTALL_ENABLED": "ci_visibility_gac_install_enabled", "DD_CIVISIBILITY_GIT_UPLOAD_ENABLED": "ci_visibility_git_upload_enabled", + "DD_CIVISIBILITY_IMPACTED_TESTS_DETECTION_ENABLED": "dd_civisibility_impacted_tests_detection_enabled", "DD_CIVISIBILITY_ITR_ENABLED": "ci_visibility_intelligent_test_runner_enabled", "DD_CIVISIBILITY_LOGS_ENABLED": "ci_visibility_logs_enabled", "DD_CIVISIBILITY_RUM_FLUSH_WAIT_MILLIS": "ci_visibility_rum_flush_wait_millis", "DD_CIVISIBILITY_TESTSSKIPPING_ENABLED": "ci_visibility_test_skipping_enabled", "DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT": "ci_visibility_total_flaky_retry_count", - "DD_CIVISIBILITY_IMPACTED_TESTS_DETECTION_ENABLED": "dd_civisibility_impacted_tests_detection_enabled", "DD_CODE_ORIGIN_FOR_SPANS_ENABLED": "code_origin_for_spans_enabled", "DD_CODE_ORIGIN_FOR_SPANS_MAX_USER_FRAMES": "code_origin_for_spans_max_user_frames", "DD_DATA_STREAMS_ENABLED": "data_streams_enabled", @@ -83,6 +83,7 @@ "DD_DYNAMIC_INSTRUMENTATION_MAX_TIME_TO_SERIALIZE": "dynamic_instrumentation_serialization_max_duration", "DD_DYNAMIC_INSTRUMENTATION_REDACTED_IDENTIFIERS": "dynamic_instrumentation_redacted_identifiers", "DD_DYNAMIC_INSTRUMENTATION_REDACTED_TYPES": "dynamic_instrumentation_redacted_types", + "DD_DYNAMIC_INSTRUMENTATION_REDACTION_EXCLUDED_IDENTIFIERS": "dynamic_instrumentation_redaction_excluded_identifiers", "DD_DYNAMIC_INSTRUMENTATION_SYMBOL_DATABASE_BATCH_SIZE_BYTES": "dynamic_instrumentation_symbol_database_batch_size_bytes", "DD_DYNAMIC_INSTRUMENTATION_SYMBOL_DATABASE_UPLOAD_ENABLED": "dynamic_instrumentation_symbol_database_upload_enabled", "DD_DYNAMIC_INSTRUMENTATION_UPLOAD_BATCH_SIZE": "dynamic_instrumentation_upload_batch_size", @@ -203,6 +204,7 @@ "DD_TRACE_BAGGAGE_MAX_ITEMS": "trace_baggage_max_items", "DD_TRACE_BATCH_INTERVAL": "trace_serialization_batch_interval", "DD_TRACE_BUFFER_SIZE": "trace_serialization_buffer_size", + "DD_TRACE_BYPASS_HTTP_REQUEST_URL_CACHING_ENABLED": "trace_bypass_http_request_url_caching_enabled", "DD_TRACE_CLIENT_IP_ENABLED": "trace_client_ip_enabled", "DD_TRACE_CLIENT_IP_HEADER": "trace_client_ip_header", "DD_TRACE_COMMANDS_COLLECTION_ENABLED": "trace_commands_collection_enabled", @@ -560,8 +562,8 @@ "enabled": "trace_enabled", "env": "env", "environment_fulltrust_appdomain": "environment_fulltrust_appdomain_enabled", - "exception_replay_capture_intermediate_spans_enabled": "dd_exception_replay_capture_intermediate_spans_enabled", "exception_debugging_enabled": "exception_replay_enabled", + "exception_replay_capture_intermediate_spans_enabled": "dd_exception_replay_capture_intermediate_spans_enabled", "exception_replay_capture_interval_seconds": "dd_exception_replay_capture_interval_seconds", "exception_replay_capture_max_frames": "dd_exception_replay_capture_max_frames", "exception_replay_enabled": "dd_exception_replay_enabled", @@ -647,6 +649,7 @@ "isGitUploadEnabled": "git_upload_enabled", "isIntelligentTestRunnerEnabled": "intelligent_test_runner_enabled", "isManualApiEnabled": "ci_visibility_manual_api_enabled", + "isServiceUserProvided": "ci_visibility_is_service_user_provided", "isTestDynamicInstrumentationEnabled": "ci_visibility_test_dynamic_instrumentation_enabled", "jmxfetch.check-period": "jmxfetch_check_period", "jmxfetch.enabled": "jmxfetch_enabled", @@ -941,15 +944,15 @@ "trace_agent_args": "agent_trace_agent_executable_args", "trace_agent_path": "agent_trace_agent_executable_path", "trace_agent_v0_5_enabled": "trace_agent_v0.5_enabled", - "trace_flush_interval": "flush_interval", - "trace_git_metadata_enabled": "git_metadata_enabled", - "trace_http_client_split-by-domain": "trace_http_client_split_by_domain", - "trace_http_client_tag_query-string": "trace_http_client_tag_query_string_enabled", "trace_cloud_payload_tagging_max-depth": "cloud_payload_tagging_max_depth", "trace_cloud_payload_tagging_max-tags": "cloud_payload_tagging_max_tags", "trace_cloud_payload_tagging_services": "cloud_payload_tagging_services", "trace_cloud_request_payload_tagging": "cloud_payload_tagging_requests_enabled", "trace_cloud_response_payload_tagging": "cloud_payload_tagging_responses_enabled", + "trace_flush_interval": "flush_interval", + "trace_git_metadata_enabled": "git_metadata_enabled", + "trace_http_client_split-by-domain": "trace_http_client_split_by_domain", + "trace_http_client_tag_query-string": "trace_http_client_tag_query_string_enabled", "trace_methods": "trace_methods", "trace_peer_service_component_overrides": "trace_peer_service_component_overrides", "trace_peerservicetaginterceptor_enabled": "trace_peer_service_tag_interceptor_enabled", diff --git a/tests/telemetry_intake/static/python_config_rules.json b/tests/telemetry_intake/static/python_config_rules.json index fe58cb47a2..ba6b4061ca 100644 --- a/tests/telemetry_intake/static/python_config_rules.json +++ b/tests/telemetry_intake/static/python_config_rules.json @@ -9,6 +9,7 @@ "DD_EXCEPTION_DEBUGGING_ENABLED": "dd_exception_debugging_enabled", "DD_INSTRUMENTATION_TELEMETRY_ENABLED": "instrumentation_telemetry_enabled", "DD_INTERNAL_RCM_POLL_INTERVAL": "remote_config_poll_interval", + "DD_LLMOBS_EVALUATOR_SAMPLING_RULES": "dd_llmobs_evaluator_sampling_rules", "DD_LOGS_INJECTION": "logs_injection_enabled", "DD_MAX_TRACES_PER_SECOND": "trace_rate_limit", "DD_PROFILING_ENABLED": "profiling_enabled", @@ -43,7 +44,6 @@ "DD_TRACE_SAMPLING_RULES": "trace_sample_rules", "DD_TRACE_SPAN_ATTRIBUTE_SCHEMA": "trace_span_attribute_schema", "DD_TRACE_STARTUP_LOGS": "trace_startup_logs_enabled", - "DD_LLMOBS_EVALUATOR_SAMPLING_RULES": "dd_llmobs_evaluator_sampling_rules", "_dd_trace_writer_log_error_payloads": "trace_writer_log_error_payloads", "agent_url": "trace_agent_url", "appsec.enabled": "appsec_enabled", diff --git a/tests/test_config_consistency.py b/tests/test_config_consistency.py index 47ea9cfd88..fa21a0ec80 100644 --- a/tests/test_config_consistency.py +++ b/tests/test_config_consistency.py @@ -205,7 +205,7 @@ def test_status_code_500(self): client_span = _get_span_by_tags(spans, tags={"span.kind": "client", "http.status_code": "500"}) assert client_span, spans - assert client_span.get("error") == None or client_span.get("error") == 0 + assert client_span.get("error") is None or client_span.get("error") == 0 @scenarios.tracing_config_nondefault @@ -276,7 +276,8 @@ def test_query_string_redaction(self): @features.tracing_configuration_consistency class Test_Config_ClientIPHeader_Configured: """Verify headers containing ips are tagged when DD_TRACE_CLIENT_IP_ENABLED=true - and DD_TRACE_CLIENT_IP_HEADER=custom-ip-header""" + and DD_TRACE_CLIENT_IP_HEADER=custom-ip-header + """ def setup_ip_headers_sent_in_one_request(self): self.req = weblog.get( @@ -311,7 +312,8 @@ def test_ip_headers_sent_in_one_request(self): @features.tracing_configuration_consistency class Test_Config_ClientIPHeader_Precedence: """Verify headers containing ips are tagged when DD_TRACE_CLIENT_IP_ENABLED=true - and headers are used to set http.client_ip in order of precedence""" + and headers are used to set http.client_ip in order of precedence + """ # Supported ip headers in order of precedence IP_HEADERS = ( @@ -496,7 +498,6 @@ def test_log_injection_enabled(self): required_fields = ["trace_id", "span_id", "service", "version", "env"] for field in required_fields: assert field in dd, f"Missing field: {field}" - return @rfc("https://docs.google.com/document/d/1kI-gTAKghfcwI7YzKhqRv2ExUstcHqADIWA4-TZ387o/edit#heading=h.8v16cioi7qxp") @@ -638,5 +639,4 @@ def parse_log_injection_message(log_message): except json.JSONDecodeError: continue if message.get("dd") and message.get(log_injection_fields[context.library.library]["message"]) == log_message: - dd = message.get("dd") - return dd + return message.get("dd") diff --git a/tests/test_data_integrity.py b/tests/test_data_integrity.py index a7297f06f4..9efabab6b8 100644 --- a/tests/test_data_integrity.py +++ b/tests/test_data_integrity.py @@ -5,7 +5,7 @@ """Misc checks around data integrity during components' lifetime""" import string -from utils import weblog, interfaces, context, bug, rfc, missing_feature, features +from utils import weblog, interfaces, context, bug, rfc, irrelevant, missing_feature, features from utils.tools import logger from utils.cgroup_info import get_container_id @@ -62,8 +62,8 @@ def validator(data): if header.lower() == "x-datadog-trace-count": try: trace_count = int(value) - except ValueError: - raise ValueError(f"'x-datadog-trace-count' request header is not an integer: {value}") + except ValueError as e: + raise ValueError(f"'x-datadog-trace-count' request header is not an integer: {value}") from e if trace_count != len(data["request"]["content"]): raise ValueError("x-datadog-trace-count request header didn't match the number of traces") @@ -145,7 +145,7 @@ def validator(data): @missing_feature(library="ruby", reason="not implemented yet") @missing_feature(library="php", reason="not implemented yet") @missing_feature(library="cpp", reason="not implemented yet") - @missing_feature(library="golang", reason="not implemented yet") + @irrelevant(library="golang", reason="implemented but not testable") def test_datadog_entity_id(self): """Datadog-Entity-ID header is present and respect the in- format""" @@ -179,21 +179,48 @@ def validator(data): interfaces.library.validate(validator, success_by_default=True) + @missing_feature(library="cpp", reason="not implemented yet") + @missing_feature(library="dotnet", reason="not implemented yet") + @missing_feature(library="java", reason="not implemented yet") + @missing_feature(library="nodejs", reason="not implemented yet") + @missing_feature(library="php", reason="not implemented yet") + @missing_feature(library="ruby", reason="not implemented yet") + @missing_feature(context.library < "golang@1.73.0-dev", reason="Implemented in v1.72.0") def test_datadog_external_env(self): """Datadog-External-Env header if present is in the {prefix}-{value},... format""" def validator(data): - for header, value in data["request"]["headers"]: - if header.lower() == "datadog-external-env": - assert value, "Datadog-External-Env header is empty" - items = value.split(",") - for item in items: - assert ( - item[2] == "-" - ), f"Datadog-External-Env item {item} is not using in the format {{prefix}}-{{value}}" + # Only test this when the path ens in /traces + if not data["path"].endswith("/traces"): + return + if _empty_request(data): + # Go sends an empty request content to /traces endpoint. + # This is a non-issue, because there are no traces to which container tags could be attached. + return + request_headers = {h[0].lower(): h[1] for h in data["request"]["headers"]} + if "datadog-external-env" not in request_headers: + raise ValueError(f"Datadog-External-ID header is missing in request {data['log_filename']}") + value = request_headers["datadog-external-env"] + items = value.split(",") + for item in items: + assert ( + item[2] == "-" + ), f"Datadog-External-Env item {item} is not using in the format {{prefix}}-{{value}}" interfaces.library.validate(validator, success_by_default=True) +@features.data_integrity +class Test_Agent: + @missing_feature(library="cpp", reason="Trace are not reported") + def test_headers(self): + """All required headers are present in all requests sent by the agent""" + interfaces.library.assert_response_header( + path_filters=interfaces.library.trace_paths, + header_name_pattern="content-type", + header_value_pattern="application/json", + ) + + def _empty_request(data): return "content" not in data["request"] or not data["request"]["content"] diff --git a/tests/test_distributed.py b/tests/test_distributed.py index ea149d2e14..6c7ef2502b 100644 --- a/tests/test_distributed.py +++ b/tests/test_distributed.py @@ -75,7 +75,7 @@ def test_span_links_from_conflicting_contexts(self): assert link1["attributes"] == {"reason": "terminated_context", "context_headers": "tracecontext"} assert link1["trace_id_high"] == 1229782938247303441 - """Datadog and tracecontext headers, trace-id does match, Datadog is primary + """Datadog and tracecontext headers, trace-id does match, Datadog is primary context we want to make sure there's no span link since they match""" def setup_no_span_links_from_nonconflicting_contexts(self): @@ -101,7 +101,7 @@ def test_no_span_links_from_nonconflicting_contexts(self): assert len(trace) == 0 - """Datadog, b3multi headers edge case where we want to make sure NOT to create a + """Datadog, b3multi headers edge case where we want to make sure NOT to create a span_link if the secondary context has trace_id 0 since that's not valid.""" def setup_no_span_links_from_invalid_trace_id(self): @@ -209,7 +209,7 @@ def test_span_links_omit_tracestate_from_conflicting_contexts(self): links = _retrieve_span_links(span) assert len(links) == 1 link1 = links[0] - assert link1.get("tracestate") == None + assert link1.get("tracestate") is None def _retrieve_span_links(span): diff --git a/tests/test_graphql.py b/tests/test_graphql.py index 6d00150904..bb06ac3b4f 100644 --- a/tests/test_graphql.py +++ b/tests/test_graphql.py @@ -31,7 +31,9 @@ def setup_execute_error_span_event(self): ) def test_execute_error_span_event(self): - """Test if the main GraphQL span contains a span event with the appropriate error information""" + """Test if the main GraphQL span contains a span event with the appropriate error information. + The error extensions allowed are DD_TRACE_GRAPHQL_ERROR_EXTENSIONS=int,float,str,bool,other. + """ assert self.request.status_code == 200 @@ -54,18 +56,35 @@ def test_execute_error_span_event(self): attributes = event["attributes"] - assert type("message") == str - assert type("type") == str - assert type("stacktrace") == str + assert isinstance(attributes["message"], str) + assert isinstance(attributes["type"], str) + assert isinstance(attributes["stacktrace"], str) for path in attributes["path"]: - assert type(path) == str + assert isinstance(path, str) for location in attributes["locations"]: assert len(location.split(":")) == 2 assert location.split(":")[0].isdigit() assert location.split(":")[1].isdigit() + assert attributes["extensions.int"] == 1 + assert attributes["extensions.float"] == 1.1 + assert attributes["extensions.str"] == "1" + assert attributes["extensions.bool"] is True + + # A list with two heterogeneous elements: [1, "foo"]. + # This test simulates an object that is not a supported scalar above (int,float,string,boolean). + # This object should be serialized as a string, either using the language's default serialization or + # JSON serialization of the object. + # The goal here is to display the original as well as possible in the UI, without supporting arbitrary + # nested levels inside `span_event.attributes`. + # use regex to match the list format + assert "1" in attributes["extensions.other"] + assert "foo" in attributes["extensions.other"] + + assert "extensions.not_captured" not in attributes + @staticmethod def _get_events(span): if "events" in span["meta"]: diff --git a/tests/test_identify.py b/tests/test_identify.py index 4b53f47baa..6a522aa808 100644 --- a/tests/test_identify.py +++ b/tests/test_identify.py @@ -75,7 +75,7 @@ def setup_identify_tags_incoming(self): self.r_incoming = weblog.get("/waf", headers=headers) def test_identify_tags_incoming(self): - """with W3C : this test expect to fail with DD_TRACE_PROPAGATION_STYLE_INJECT=W3C""" + """With W3C : this test expect to fail with DD_TRACE_PROPAGATION_STYLE_INJECT=W3C""" tagTable = {"_dd.p.usr.id": "dXNyLmlk"} interfaces.library.validate_spans(self.r_incoming, validate_identify_tags(tagTable)) @@ -100,11 +100,11 @@ def setup_identify_tags_incoming(self): self.r_incoming = weblog.get("/waf", headers=headers) def test_identify_tags_incoming(self): - """with W3C : this test expect to fail with DD_TRACE_PROPAGATION_STYLE_INJECT=W3C""" + """With W3C : this test expect to fail with DD_TRACE_PROPAGATION_STYLE_INJECT=W3C""" def usr_id_not_present(span): if "usr.id" in span["meta"]: - raise Exception(f"usr.id must not be present in this span") + raise Exception("usr.id must not be present in this span") return True tagTable = {"_dd.p.usr.id": "dXNyLmlk"} diff --git a/tests/test_library_conf.py b/tests/test_library_conf.py index e2d112bf3e..24e285eee2 100644 --- a/tests/test_library_conf.py +++ b/tests/test_library_conf.py @@ -3,10 +3,31 @@ # Copyright 2021 Datadog, Inc. from utils import weblog, interfaces, scenarios, features, missing_feature -from utils._context.header_tag_vars import * +from utils._context.header_tag_vars import ( + CONFIG_COLON_LEADING, + CONFIG_COLON_TRAILING, + HEADER_NAME_COLON_LEADING, + HEADER_NAME_COLON_TRAILING, + HEADER_NAME_LONG, + HEADER_NAME_SHORT, + HEADER_NAME_WHITESPACE_HEADER, + HEADER_NAME_WHITESPACE_TAG, + HEADER_NAME_WHITESPACE_VAL_LONG, + HEADER_NAME_WHITESPACE_VAL_SHORT, + HEADER_VAL_BASIC, + HEADER_VAL_WHITESPACE_VAL_LONG, + HEADER_VAL_WHITESPACE_VAL_SHORT, + TAG_COLON_LEADING, + TAG_COLON_TRAILING, + TAG_LONG, + TAG_SHORT, + TAG_WHITESPACE_HEADER, + TAG_WHITESPACE_TAG, + TAG_WHITESPACE_VAL_LONG, + TAG_WHITESPACE_VAL_SHORT, +) from utils import remote_config as rc import json -import pprint # basic / legacy tests, just tests user-agent can be received as a tag @@ -60,7 +81,8 @@ def test_trace_header_tags(self): @features.http_headers_as_tags_dd_trace_header_tags class Test_HeaderTags_Whitespace_Header: """Validates that leading/trailing whitespaces are trimmed on the header values given to DD_TRACE_HEADER_TAGS - e.g, ' header ' in DD_TRACE_HEADER_TAGS=' header ' becomes 'header' and is expected to match req.header of 'header'""" + e.g, ' header ' in DD_TRACE_HEADER_TAGS=' header ' becomes 'header' and is expected to match req.header of 'header' + """ def setup_trace_header_tags(self): self.headers = {HEADER_NAME_WHITESPACE_HEADER: HEADER_VAL_BASIC} @@ -78,7 +100,8 @@ def test_trace_header_tags(self): @features.http_headers_as_tags_dd_trace_header_tags class Test_HeaderTags_Whitespace_Tag: """Validates that leading/trailing whitespaces on the Input to DD_TRACE_HEADER_TAGS are - trimmed on mapping parts, but whitespaces in between non-whitespace chars are left in-tact.""" + trimmed on mapping parts, but whitespaces in between non-whitespace chars are left in-tact. + """ def setup_trace_header_tags(self): self.headers = {HEADER_NAME_WHITESPACE_TAG: HEADER_VAL_BASIC} @@ -96,7 +119,8 @@ def test_trace_header_tags(self): @features.http_headers_as_tags_dd_trace_header_tags class Test_HeaderTags_Whitespace_Val_Short: """Validates that between-char whitespaces in header values are not removed, - but leading/trailing whitespace is stripped, using short form input""" + but leading/trailing whitespace is stripped, using short form input + """ def setup_trace_header_tags(self): self.headers = {HEADER_NAME_WHITESPACE_VAL_SHORT: HEADER_VAL_WHITESPACE_VAL_SHORT} @@ -114,7 +138,8 @@ def test_trace_header_tags(self): @features.http_headers_as_tags_dd_trace_header_tags class Test_HeaderTags_Whitespace_Val_Long: """Validates that between-char whitespaces in header values are not removed, - but leading/trailing whitespace is stripped, using long form input""" + but leading/trailing whitespace is stripped, using long form input + """ def setup_trace_header_tags(self): self.headers = {HEADER_NAME_WHITESPACE_VAL_LONG: HEADER_VAL_WHITESPACE_VAL_LONG} @@ -249,8 +274,8 @@ def get_rc_params(self, header_tags): "service_target": {"service": "weblog", "env": "system-tests"}, "lib_config": header_tags, } - id = hash(json.dumps(config)) - return f"datadog/2/APM_TRACING/{id}/config", config + rc_id = hash(json.dumps(config)) + return f"datadog/2/APM_TRACING/{rc_id}/config", config # The Datadog specific tracecontext flags to mark flags are set diff --git a/tests/test_profiling.py b/tests/test_profiling.py index d52822faa7..e37e3032b6 100644 --- a/tests/test_profiling.py +++ b/tests/test_profiling.py @@ -17,9 +17,11 @@ class Test_Profile: """Basic testing of profiling""" + _is_set_up = False # used to do the setup only once + @staticmethod def _common_setup(): - if hasattr(Test_Profile, "_is_set_up"): + if Test_Profile._is_set_up: return Test_Profile._is_set_up = True diff --git a/tests/test_remote_config.py b/tests/test_remote_config.py deleted file mode 100644 index 535a2773b0..0000000000 --- a/tests/test_remote_config.py +++ /dev/null @@ -1,24 +0,0 @@ -# Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0. -# This product includes software developed at Datadog (https://www.datadoghq.com/). -# Copyright 2021 Datadog, Inc. - -from utils import rfc, interfaces, features - - -@rfc("https://docs.google.com/document/d/1bUVtEpXNTkIGvLxzkNYCxQzP2X9EK9HMBLHWXr_5KLM/edit#heading=h.vy1jegxy7cuc") -@features.remote_config_object_supported -class Test_NoError: - """A library should apply with no error all remote config payload.""" - - def test_no_error(self): - def no_error(data): - config_states = ( - data.get("request", {}).get("content", {}).get("client", {}).get("state", {}).get("config_states", {}) - ) - - for state in config_states: - error = state.get("apply_error", None) - if error is not None: - raise Exception(f"Error in remote config application: {error}") - - interfaces.library.validate_remote_configuration(no_error, success_by_default=True) diff --git a/tests/test_sampling_rates.py b/tests/test_sampling_rates.py index bb7628fef7..005db5d018 100644 --- a/tests/test_sampling_rates.py +++ b/tests/test_sampling_rates.py @@ -6,7 +6,7 @@ import csv from random import randint, seed -from utils import weblog, interfaces, context, missing_feature, bug, irrelevant, flaky, scenarios, features +from utils import weblog, interfaces, context, bug, irrelevant, flaky, scenarios, features from utils.tools import logger @@ -251,7 +251,7 @@ def validator(data): if len(decisions) < 2: continue - if not all((d == decisions[0] for d in decisions)): + if not all(d == decisions[0] for d in decisions): raise ValueError(f"Sampling decisions are not deterministic for trace_id {trace_id}: {decisions}") def _load_csv_sampling_decisions(self): diff --git a/tests/test_scrubbing.py b/tests/test_scrubbing.py index 5699e5233b..122dd26782 100644 --- a/tests/test_scrubbing.py +++ b/tests/test_scrubbing.py @@ -78,7 +78,7 @@ def setup_main(self): context.weblog_variant in ("vertx3", "vertx4", "jersey-grizzly2", "akka-http"), reason="Need weblog endpoint" ) def test_main(self): - """check that not data is leaked""" + """Check that not data is leaked""" assert self.r.status_code == 200 def validate_report(trace): diff --git a/tests/test_semantic_conventions.py b/tests/test_semantic_conventions.py index f296bb5936..19b6c40bbe 100644 --- a/tests/test_semantic_conventions.py +++ b/tests/test_semantic_conventions.py @@ -150,10 +150,10 @@ def test_meta_span_kind(self): def validator(span): if span.get("parent_id") not in (0, None): # do nothing if not root span - return + return None if span.get("type") != "web": # do nothing if is not web related - return + return None assert "span.kind" in span["meta"], "Web span expects a span.kind meta tag" assert span["meta"]["span.kind"] in ["server", "client"], "Meta tag span.kind should be client or server" @@ -170,10 +170,10 @@ def test_meta_http_url(self): def validator(span): if span.get("parent_id") not in (0, None): # do nothing if not root span - return + return None if span.get("type") != "web": # do nothing if is not web related - return + return None assert "http.url" in span["meta"], "web span expect an http.url meta tag" @@ -189,10 +189,10 @@ def test_meta_http_status_code(self): def validator(span): if span.get("parent_id") not in (0, None): # do nothing if not root span - return + return None if span.get("type") != "web": # do nothing if is not web related - return + return None assert "http.status_code" in span["meta"], "web span expect an http.status_code meta tag" @@ -207,10 +207,10 @@ def test_meta_http_method(self): def validator(span): if span.get("parent_id") not in (0, None): # do nothing if not root span - return + return None if span.get("type") != "web": # do nothing if is not web related - return + return None assert "http.method" in span["meta"], "web span expect an http.method meta tag" diff --git a/tests/test_span_events.py b/tests/test_span_events.py index b7ed80217f..3e5d9138c2 100644 --- a/tests/test_span_events.py +++ b/tests/test_span_events.py @@ -2,14 +2,13 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. -from utils import context, interfaces, irrelevant, weblog, scenarios, features, rfc, missing_feature +from utils import context, interfaces, irrelevant, weblog, scenarios, features, rfc, bug @rfc("https://docs.google.com/document/d/1cVod_VI7Yruq8U9dfMRFJd7npDu-uBpste2IB04GyaQ") @features.span_events class Test_SpanEvents_WithAgentSupport: - """ - Test that tracers send natively serialized span events if the agent support and serialization format support it. + """Test that tracers send natively serialized span events if the agent support and serialization format support it. Tracers only need to test for their default serialization format (v0.4, v0.5, v0.7). Request the Weblog endpoint `/add_event`, which adds a span event (with any name and attributes values) @@ -19,7 +18,7 @@ class Test_SpanEvents_WithAgentSupport: def setup_v04_v07_default_format(self): self.r = weblog.get("/add_event") - @missing_feature(context.library in ["ruby"], reason="Native serialization not implemented") + @bug(library="ruby", reason="APMAPI-1141") def test_v04_v07_default_format(self): """For traces that default to the v0.4 or v0.7 format, send events as a top-level `span_events` field""" interfaces.library.assert_trace_exists(self.r) @@ -35,8 +34,7 @@ def setup_v05_default_format(self): @irrelevant(context.library in ["ruby"], reason="v0.5 is not the default format") def test_v05_default_format(self): - """ - For traces that default to the v0.5 format, send events as the span tag `events` + """For traces that default to the v0.5 format, send events as the span tag `events` given this format does not support native serialization. """ interfaces.library.assert_trace_exists(self.r) @@ -59,8 +57,7 @@ def _get_span(self, request): @features.span_events @scenarios.agent_not_supporting_span_events class Test_SpanEvents_WithoutAgentSupport: - """ - Test that tracers do not attempt to send natively serialized span events if the agent does not support it. + """Test that tracers do not attempt to send natively serialized span events if the agent does not support it. Request the Weblog endpoint `/add_event`, which adds a span event (with any name and attributes values) to the request root span. diff --git a/tests/test_standard_tags.py b/tests/test_standard_tags.py index 6b286a72b5..22bbcb7351 100644 --- a/tests/test_standard_tags.py +++ b/tests/test_standard_tags.py @@ -270,8 +270,8 @@ def _setup_without_attack(self): for header, value in (self.FORWARD_HEADERS | self.FORWARD_HEADERS_VENDOR).items(): self.requests_without_attack[header] = weblog.get("/waf/", headers={header: value}) - def _test_client_ip(self, forward_headers): - for header, _ in forward_headers.items(): + def _test_client_ip(self, forward_headers: dict[str, str]): + for header in forward_headers: request = self.requests_without_attack[header] meta = self._get_root_span_meta(request) assert "http.client_ip" in meta, f"Missing http.client_ip for {header}" diff --git a/tests/test_telemetry.py b/tests/test_telemetry.py index 05831379ff..3c034ba56c 100644 --- a/tests/test_telemetry.py +++ b/tests/test_telemetry.py @@ -138,7 +138,7 @@ def test_seq_id(self): if len(telemetry_data) == 0: raise ValueError("No telemetry data to validate on") - runtime_ids = set((data["request"]["content"]["runtime_id"] for data in telemetry_data)) + runtime_ids = set(data["request"]["content"]["runtime_id"] for data in telemetry_data) for runtime_id in runtime_ids: logger.debug(f"Validating telemetry messages for runtime_id {runtime_id}") max_seq_id = 0 @@ -162,14 +162,13 @@ def test_seq_id(self): if seq_id > max_seq_id: max_seq_id = seq_id received_max_time = curr_message_time - else: - if received_max_time is not None and (curr_message_time - received_max_time) > timedelta( - seconds=MAX_OUT_OF_ORDER_LAG - ): - raise ValueError( - f"Received message with seq_id {seq_id} to far more than" - f"100ms after message with seq_id {max_seq_id}" - ) + elif received_max_time is not None and (curr_message_time - received_max_time) > timedelta( + seconds=MAX_OUT_OF_ORDER_LAG + ): + raise ValueError( + f"Received message with seq_id {seq_id} to far more than" + f"100ms after message with seq_id {max_seq_id}" + ) # sort by seq_id, seq_ids is an array of (id, filename), so the key is the first element seq_ids.sort(key=lambda item: item[0]) @@ -205,7 +204,7 @@ def test_app_started_sent_exactly_once(self): if data["response"]["status_code"] == 202: count_by_runtime_id[runtime_id] += 1 - assert all((count == 1 for count in count_by_runtime_id.values())) + assert all(count == 1 for count in count_by_runtime_id.values()) @missing_feature(context.library < "ruby@1.22.0", reason="app-started not sent") @bug(context.library >= "dotnet@3.4.0", reason="APMAPI-728") @@ -222,7 +221,7 @@ def test_app_started_is_first_message(self): else: # In theory, app-started must have seq_id 1, but tracers may skip seq_ids if sending messages fail. # So we will check that app-started is the first message by seq_id, rather than strictly seq_id 1. - telemetry_data = list(sorted(telemetry_data, key=lambda x: x["request"]["content"]["seq_id"])) + telemetry_data = sorted(telemetry_data, key=lambda x: x["request"]["content"]["seq_id"]) app_started = [d for d in telemetry_data if d["request"]["content"].get("request_type") == "app-started"] assert app_started, "app-started message not found" min_seq_id = min(d["request"]["content"]["seq_id"] for d in telemetry_data) @@ -282,15 +281,14 @@ def save_data(data, container): ) if len(self.library_requests) != 0: - for s, r in self.library_requests.keys(): + for s, r in self.library_requests: logger.error(f"seq_id: {s}, runtime_id: {r}") raise Exception("The following telemetry messages were not forwarded by the agent") @staticmethod def _get_heartbeat_delays_by_runtime() -> dict: - """ - Returns a dict where : + """Returns a dict where : The key is the runtime id The value is a list of delay observed on this runtime id """ @@ -337,8 +335,7 @@ def _get_heartbeat_delays_by_runtime() -> dict: @bug(context.library > "php@1.5.1", reason="APMAPI-971") @features.telemetry_heart_beat_collected def test_app_heartbeats_delays(self): - """ - Check for telemetry heartbeat are not sent too fast/slow, regarding DD_TELEMETRY_HEARTBEAT_INTERVAL + """Check for telemetry heartbeat are not sent too fast/slow, regarding DD_TELEMETRY_HEARTBEAT_INTERVAL There are a lot of reason for individual heartbeats to be sent too slow/fast, and the subsequent ones to be sent too fast/slow so the RFC says that it must not drift. So we will check the average delay """ @@ -366,12 +363,12 @@ def setup_app_dependencies_loaded(self): @irrelevant( library="java", reason=""" - A Java application can be redeployed to the same server for many times (for the same JVM process). + A Java application can be redeployed to the same server for many times (for the same JVM process). That means, every new deployment/reload of application will cause reloading classes/dependencies and as the result we will see duplications. """, ) def test_app_dependencies_loaded(self): - """test app-dependencies-loaded requests""" + """Test app-dependencies-loaded requests""" test_loaded_dependencies = { "dotnet": {"NodaTime": False}, @@ -544,11 +541,11 @@ def test_app_product_change(self): dynamic_instrumentation_enabled = product["dynamic_instrumentation"]["enabled"] assert ( appsec_enabled is True - ), f"Product appsec Product profiler enabled was expected to be True, found False" - assert profiler_enabled is True, f"Product profiler enabled was expected to be True, found False" + ), "Product appsec Product profiler enabled was expected to be True, found False" + assert profiler_enabled is True, "Product profiler enabled was expected to be True, found False" assert ( dynamic_instrumentation_enabled is False - ), f"Product dynamic_instrumentation enabled was expected to be False, found True" + ), "Product dynamic_instrumentation enabled was expected to be False, found True" if app_product_change_event_found is False: raise Exception("app-product-change is not emitted when product change is enabled") @@ -597,8 +594,7 @@ def test_app_started_product_info(self): @bug(library="java", reason="APMAPI-969") def test_config_telemetry_completeness(self): - """ - Assert that config telemetry is handled properly by telemetry intake + """Assert that config telemetry is handled properly by telemetry intake Runbook: https://github.com/DataDog/system-tests/blob/main/docs/edit/runbook.md#test_config_telemetry_completeness """ diff --git a/tests/test_the_test/test_conventions.py b/tests/test_the_test/test_conventions.py index 52f4ab0558..2c53a77c5d 100644 --- a/tests/test_the_test/test_conventions.py +++ b/tests/test_the_test/test_conventions.py @@ -6,7 +6,7 @@ def test_utils(): # verify that all files in test folder are either a test file, a utils.py file or a conftest.py file for folder, _, files in os.walk("tests"): - if folder.startswith("tests/fuzzer") or folder.startswith("tests/perfs"): + if folder.startswith(("tests/fuzzer", "tests/perfs")): # do not check these folders, they are particular use cases continue diff --git a/tests/test_the_test/test_decorators.py b/tests/test_the_test/test_decorators.py index f61bac0fc1..8244faeb68 100644 --- a/tests/test_the_test/test_decorators.py +++ b/tests/test_the_test/test_decorators.py @@ -25,7 +25,7 @@ def is_skipped(item, reason): print(f"Found expected {mark} for {item}") return True - print(f"{item} is skipped, but reason is {repr(mark.kwargs['reason'])} io {repr(reason)}") + print(f"{item} is skipped, but reason is {mark.kwargs['reason']!r} io {reason!r}") raise Exception(f"{item} is not skipped, or not with the good reason") diff --git a/tests/test_the_test/test_docker_scenario.py b/tests/test_the_test/test_docker_scenario.py index 0533344ffc..0dc22e26f9 100644 --- a/tests/test_the_test/test_docker_scenario.py +++ b/tests/test_the_test/test_docker_scenario.py @@ -68,7 +68,7 @@ def __init__(self) -> None: @scenarios.test_the_test def test_recursive_2(): - """more complex""" + """More complex""" class FakeScenario(DockerScenario): def __init__(self) -> None: diff --git a/tests/test_the_test/test_features.py b/tests/test_the_test/test_features.py index 0bb94d0ae3..a307b0eb2f 100644 --- a/tests/test_the_test/test_features.py +++ b/tests/test_the_test/test_features.py @@ -26,7 +26,6 @@ def test_not_reported(): @scenarios.test_the_test def test_all_class_has_feature_decorator(session, deselected_items): - allow_no_feature_nodes = session.config.inicfg["allow_no_feature_nodes"] processed_nodes = set() shouldfail = False @@ -38,14 +37,8 @@ def test_all_class_has_feature_decorator(session, deselected_items): processed_nodes.add(reported_node_id) - allow_missing_declaration = False - - for node in allow_no_feature_nodes: - if item.nodeid.startswith(node): - allow_missing_declaration = True - break - - if allow_missing_declaration: + if item.nodeid.startswith("tests/test_the_test/"): + # special use case of test the test folder continue declared_features = [marker.kwargs["feature_id"] for marker in item.iter_markers("features")] diff --git a/tests/test_the_test/test_group_rules.py b/tests/test_the_test/test_group_rules.py new file mode 100644 index 0000000000..553ac5bec6 --- /dev/null +++ b/tests/test_the_test/test_group_rules.py @@ -0,0 +1,66 @@ +# right now, no rules are defined for groups +# it means that we cannot b y design make some group part of another group +# waiting for a clean solution to this problem, let's just test it + +from utils import scenarios +from utils._context._scenarios import ScenarioGroup, get_all_scenarios + + +@scenarios.test_the_test +def test_appsec(): + for scenario in get_all_scenarios(): + if ScenarioGroup.APPSEC_RASP in scenario.scenario_groups: + assert ScenarioGroup.APPSEC in scenario.scenario_groups + + +@scenarios.test_the_test +def test_tracer_release(): + # make an exclusion list + + not_in_tracer_release_group = [ + # list of scenario that will never be part of tracer release + scenarios.everything_disabled, + scenarios.fuzzer, + scenarios.mock_the_test, + scenarios.test_the_test, + scenarios.todo, + # Is it targeting tracers ? + scenarios.otel_integrations, + scenarios.otel_log_e2e, + scenarios.otel_metric_e2e, + scenarios.otel_tracing_e2e, + # to be added once stability is proven + scenarios.chaos_installer_auto_injection, + scenarios.container_auto_injection_install_script_profiling, + scenarios.container_auto_injection_install_script, + scenarios.docker_ssi, + scenarios.external_processing_blocking, # need to declare a white list of library in get-workflow-parameters + scenarios.external_processing, # need to declare a white list of library in get-workflow-parameters + scenarios.host_auto_injection_install_script_profiling, + scenarios.host_auto_injection_install_script, + scenarios.installer_auto_injection, + scenarios.installer_not_supported_auto_injection, + scenarios.k8s_lib_injection_no_ac_uds, + scenarios.k8s_lib_injection_no_ac, + scenarios.k8s_lib_injection_operator, + scenarios.k8s_lib_injection_profiling_disabled, + scenarios.k8s_lib_injection_profiling_enabled, + scenarios.k8s_lib_injection_profiling_override, + scenarios.k8s_lib_injection_spark_djm, + scenarios.k8s_lib_injection_uds, + scenarios.k8s_lib_injection, + scenarios.lib_injection_validation_unsupported_lang, + scenarios.lib_injection_validation, + scenarios.local_auto_injection_install_script, + scenarios.simple_auto_injection_profiling, + scenarios.simple_installer_auto_injection, + ] + + for scenario in get_all_scenarios(): + if ScenarioGroup.TRACER_RELEASE not in scenario.scenario_groups: + assert ( + scenario in not_in_tracer_release_group + ), f"Scenario {scenario} is not part of {ScenarioGroup.TRACER_RELEASE}" + + if scenario in not_in_tracer_release_group: + assert ScenarioGroup.TRACER_RELEASE not in scenario.scenario_groups diff --git a/tests/test_the_test/test_json_report.py b/tests/test_the_test/test_json_report.py index acd0aa93d8..bae54407bc 100644 --- a/tests/test_the_test/test_json_report.py +++ b/tests/test_the_test/test_json_report.py @@ -24,7 +24,7 @@ def setup_class(cls): cls.report = json.load(f) with open("logs_mock_the_test/tests.log", encoding="utf-8") as f: - cls.logs = [line.split(" ", 1)[1] for line in f.readlines()] + cls.logs = [line.split(" ", 1)[1] for line in f] def get_test_fp(self, nodeid): for test in self.report["tests"]: @@ -60,7 +60,7 @@ def test_clean_test_data(self): assert len(test) == 6, list(test.keys()) # testDeclaration, details, features, outcome, lineNumber and path def test_context_serialization(self): - """check context serialization node is generating""" + """Check context serialization node is generating""" # Check library node (version is set on TestTheTest scenario) assert self.report["language"] == "java", list(self.report) @@ -81,7 +81,7 @@ def test_feature_id(self): assert test["features"] == [75, 13, 74, 666] def test_skip_reason(self): - """the skip reason must be the closest to the test method""" + """The skip reason must be the closest to the test method""" test = self.get_test_fp("Test_Mock2::test_skipped") assert test["testDeclaration"] == "bug" assert test["details"] == "bug (FAKE-002)" @@ -141,7 +141,7 @@ def test_logs(self): class Test_Mock: def test_mock(self): """Mock test doc""" - assert 1 == 1 + assert 1 == 1 # noqa: PLR0133 @missing_feature(True, reason="not yet done") @features.app_client_configuration_change_event diff --git a/tests/test_the_test/test_scenario_names.py b/tests/test_the_test/test_scenario_names.py new file mode 100644 index 0000000000..8f2de744f5 --- /dev/null +++ b/tests/test_the_test/test_scenario_names.py @@ -0,0 +1,19 @@ +import re +from utils import scenarios +from utils._context._scenarios import get_all_scenarios + + +@scenarios.test_the_test +def test_scenario_names(): + for scenario in get_all_scenarios(): + name = scenario.name + assert re.fullmatch( + r"^[A-Z][A-Z\d_]+$", name + ), f"'{name}' is not a valid name for a scenario, it should be only capital letters" + + expected_property = name.lower() + + assert hasattr(scenarios, expected_property), f"Scenarios object sould have the {expected_property} property" + assert ( + getattr(scenarios, expected_property) is scenario + ), f"scenarios.{expected_property} should be the {scenario} object" diff --git a/tests/test_the_test/test_scrubber.py b/tests/test_the_test/test_scrubber.py index 29da1a3260..beb38bd30b 100644 --- a/tests/test_the_test/test_scrubber.py +++ b/tests/test_the_test/test_scrubber.py @@ -36,7 +36,7 @@ @scenarios.test_the_test def test_log_scrubber(): cmd = ["./run.sh", "MOCK_THE_TEST", FILENAME] - subprocess.run(cmd, env=scrubbed_names | os.environ, text=True, capture_output=True) + subprocess.run(cmd, env=scrubbed_names | os.environ, text=True, capture_output=True, check=False) redacted_count = 0 @@ -47,11 +47,11 @@ def test_log_scrubber(): with open(file_path, "r", encoding="utf-8") as f: data = f.read() - redacted_count += data.count("") + redacted_count += data.count("--redacted--") for secret in scrubbed_names.values(): assert secret not in data, f"{secret} found in {file_path}" - # extra portection to make sure we redacted all secrets + # extra protection to make sure we redacted all secrets assert redacted_count != 0, "No secrets were redacted" diff --git a/tests/test_the_test/test_version.py b/tests/test_the_test/test_version.py index 1e7b7e2571..7400517c8d 100644 --- a/tests/test_the_test/test_version.py +++ b/tests/test_the_test/test_version.py @@ -15,19 +15,19 @@ def test_version_comparizon(): assert v <= "1.1" assert v <= "1.0" - assert "1.1" >= v - assert "1.0" >= v + assert v <= "1.1" + assert v <= "1.0" assert v < "1.1" - assert "1.1" > v + assert v < "1.1" assert v >= "0.9" assert v >= "1.0" - assert "0.9" <= v - assert "1.0" <= v + assert v >= "0.9" + assert v >= "1.0" assert v > "0.9" - assert "0.9" < v + assert v > "0.9" assert str(Version("v1.3.1")) == "1.3.1" @@ -58,7 +58,7 @@ def test_ruby_version(): def test_library_version_comparizon(): assert LibraryVersion("x", "1.31.1") < "x@1.34.1" - assert "x@1.31.1" < LibraryVersion("x", "v1.34.1") + assert LibraryVersion("x", "v1.34.1") > "x@1.31.1" assert LibraryVersion("x", "1.31.1") < LibraryVersion("x", "v1.34.1") assert LibraryVersion("python", "1.1.0rc2.dev15+gc41d325d") >= "python@1.1.0rc2.dev" @@ -136,30 +136,30 @@ def test_library_version(): assert v <= "p@1.1" assert v <= "p@1.0" - assert "p@1.1" >= v - assert "p@1.0" >= v + assert v <= "p@1.1" + assert v <= "p@1.0" assert v < "p@1.1" - assert "p@1.1" > v + assert v < "p@1.1" assert v >= "p@0.9" assert v >= "p@1.0" - assert "p@0.9" <= v - assert "p@1.0" <= v + assert v >= "p@0.9" + assert v >= "p@1.0" assert v > "p@0.9" - assert "p@0.9" < v + assert v > "p@0.9" assert (v <= "u@1.0") is False assert (v >= "u@1.0") is False - assert ("u@1.0" <= v) is False - assert ("u@1.0" >= v) is False + assert (v >= "u@1.0") is False + assert (v <= "u@1.0") is False v = LibraryVersion("p") - assert ("u@1.0" == v) is False - assert ("u@1.0" <= v) is False + assert (v == "u@1.0") is False + assert (v >= "u@1.0") is False v = LibraryVersion("python", "0.53.0.dev70+g494e6dc0") assert v == "python@0.53.0.dev70+g494e6dc0" diff --git a/utils/_context/_scenarios/__init__.py b/utils/_context/_scenarios/__init__.py index e1a86243b8..71655eeb93 100644 --- a/utils/_context/_scenarios/__init__.py +++ b/utils/_context/_scenarios/__init__.py @@ -64,6 +64,15 @@ class _Scenarios: profiling = ProfilingScenario("PROFILING") + appsec_no_stats = EndToEndScenario( + name="APPSEC_NO_STATS", + doc=( + "End to end testing with default values. Default scenario has DD_TRACE_COMPUTE_STATS=true." + "This scenario let that env to use its default" + ), + scenario_groups=[ScenarioGroup.APPSEC], + ) + sampling = EndToEndScenario( "SAMPLING", tracer_sampling_rate=0.5, @@ -153,7 +162,10 @@ class _Scenarios: # This GraphQL scenario can be used for any GraphQL testing, not just AppSec graphql_appsec = EndToEndScenario( "GRAPHQL_APPSEC", - weblog_env={"DD_APPSEC_RULES": "/appsec_blocking_rule.json"}, + weblog_env={ + "DD_APPSEC_RULES": "/appsec_blocking_rule.json", + "DD_TRACE_GRAPHQL_ERROR_EXTENSIONS": "int,float,str,bool,other", + }, weblog_volumes={"./tests/appsec/blocking_rule.json": {"bind": "/appsec_blocking_rule.json", "mode": "ro"}}, doc="AppSec tests for GraphQL integrations", github_workflow="graphql", @@ -650,7 +662,7 @@ class _Scenarios: scenario_groups=[ScenarioGroup.DEBUGGER], ) - fuzzer = DockerScenario("_FUZZER", doc="Fake scenario for fuzzing (launch without pytest)", github_workflow=None) + fuzzer = DockerScenario("FUZZER", doc="Fake scenario for fuzzing (launch without pytest)", github_workflow=None) # Single Step Instrumentation scenarios (HOST and CONTAINER) @@ -820,6 +832,20 @@ class _Scenarios: scenario_groups=[ScenarioGroup.APPSEC, ScenarioGroup.APPSEC_RASP], ) + appsec_rasp_non_blocking = EndToEndScenario( + "APPSEC_RASP_NON_BLOCKING", + weblog_env={"DD_APPSEC_RASP_ENABLED": "true", "DD_APPSEC_RULES": "/appsec_rasp_non_blocking_ruleset.json"}, + weblog_volumes={ + "./tests/appsec/rasp/rasp_non_blocking_ruleset.json": { + "bind": "/appsec_rasp_non_blocking_ruleset.json", + "mode": "ro", + } + }, + doc="Enable APPSEC RASP", + github_workflow="endtoend", + scenario_groups=[ScenarioGroup.APPSEC], + ) + agent_not_supporting_span_events = EndToEndScenario( "AGENT_NOT_SUPPORTING_SPAN_EVENTS", span_events=False, diff --git a/utils/_context/_scenarios/default.py b/utils/_context/_scenarios/default.py index 0bb365d0ef..bfb62df503 100644 --- a/utils/_context/_scenarios/default.py +++ b/utils/_context/_scenarios/default.py @@ -36,6 +36,7 @@ def __init__(self, name: str): "DD_TRACE_FEATURES": "discovery", "DD_TRACE_COMPUTE_STATS": "true", "SOME_SECRET_ENV": "leaked-env-var", # used for test that env var are not leaked + "DD_EXTERNAL_ENV": "it-false,cn-weblog,pu-75a2b6d5-3949-4afb-ad0d-92ff0674e759", }, agent_env={"SOME_SECRET_ENV": "leaked-env-var"}, include_postgres_db=True, diff --git a/utils/_context/_scenarios/docker_ssi.py b/utils/_context/_scenarios/docker_ssi.py index c666fd6943..a88be2406b 100644 --- a/utils/_context/_scenarios/docker_ssi.py +++ b/utils/_context/_scenarios/docker_ssi.py @@ -8,7 +8,7 @@ from docker.errors import BuildError from docker.models.networks import Network -from utils import context, interfaces +from utils import interfaces from utils._context.library_version import LibraryVersion, Version from utils._context.containers import ( create_network, @@ -90,6 +90,8 @@ def configure(self, config): # 3.1 Install the ssi to run the auto instrumentation (allway build using the ssi installer image buit in the step 2) # 3.2 Build the weblog image using the ssi image built in the step 3.1 self.ssi_image_builder = DockerSSIImageBuilder( + self.name, + self.host_log_folder, self._base_weblog, self._base_image, self._library, @@ -234,6 +236,8 @@ class DockerSSIImageBuilder: def __init__( self, + scenario_name, + host_log_folder, base_weblog, base_image, library, @@ -245,6 +249,8 @@ def __init__( custom_library_version, custom_injector_version, ) -> None: + self.scenario_name = scenario_name + self.host_log_folder = host_log_folder self._base_weblog = base_weblog self._base_image = base_image self._library = library @@ -442,23 +448,33 @@ def tested_components(self): def print_docker_build_logs(self, image_tag, build_logs): """Print the docker build logs to docker_build.log file""" - scenario_name = context.scenario.name - vm_logger(scenario_name, "docker_build").info("***************************************************************") - vm_logger(scenario_name, "docker_build").info(f" Building docker image with tag: {image_tag} ") - vm_logger(scenario_name, "docker_build").info("***************************************************************") + vm_logger(self.scenario_name, "docker_build", log_folder=self.host_log_folder).info( + "***************************************************************" + ) + vm_logger(self.scenario_name, "docker_build", log_folder=self.host_log_folder).info( + f" Building docker image with tag: {image_tag} " + ) + vm_logger(self.scenario_name, "docker_build", log_folder=self.host_log_folder).info( + "***************************************************************" + ) for chunk in build_logs: if "stream" in chunk: for line in chunk["stream"].splitlines(): - vm_logger(scenario_name, "docker_build").info(line) + vm_logger(self.scenario_name, "docker_build", log_folder=self.host_log_folder).info(line) def print_docker_push_logs(self, image_tag, push_logs): """Print the docker push logs to docker_push.log file""" - scenario_name = context.scenario.name - vm_logger(scenario_name, "docker_push").info("***************************************************************") - vm_logger(scenario_name, "docker_push").info(f" Push docker image with tag: {image_tag} ") - vm_logger(scenario_name, "docker_push").info("***************************************************************") - vm_logger(scenario_name, "docker_push").info(push_logs) + vm_logger(self.scenario_name, "docker_push", log_folder=self.host_log_folder).info( + "***************************************************************" + ) + vm_logger(self.scenario_name, "docker_push", log_folder=self.host_log_folder).info( + f" Push docker image with tag: {image_tag} " + ) + vm_logger(self.scenario_name, "docker_push", log_folder=self.host_log_folder).info( + "***************************************************************" + ) + vm_logger(self.scenario_name, "docker_push", log_folder=self.host_log_folder).info(push_logs) def _get_free_port(): diff --git a/utils/_context/_scenarios/endtoend.py b/utils/_context/_scenarios/endtoend.py index 067f322167..64ee8033ed 100644 --- a/utils/_context/_scenarios/endtoend.py +++ b/utils/_context/_scenarios/endtoend.py @@ -27,6 +27,8 @@ MsSqlServerContainer, BuddyContainer, TestedContainer, + LocalstackContainer, + ElasticMQContainer, _get_client as get_docker_client, ) @@ -67,6 +69,8 @@ def __init__( include_rabbitmq=False, include_mysql_db=False, include_sqlserver=False, + include_localstack=False, + include_elasticmq=False, ) -> None: super().__init__(name, doc=doc, github_workflow=github_workflow, scenario_groups=scenario_groups) @@ -114,6 +118,12 @@ def __init__( if include_sqlserver: self._supporting_containers.append(MsSqlServerContainer(host_log_folder=self.host_log_folder)) + if include_localstack: + self._supporting_containers.append(LocalstackContainer(host_log_folder=self.host_log_folder)) + + if include_elasticmq: + self._supporting_containers.append(ElasticMQContainer(host_log_folder=self.host_log_folder)) + self._required_containers.extend(self._supporting_containers) def get_image_list(self, library: str, weblog: str) -> list[str]: @@ -256,6 +266,8 @@ def __init__( include_rabbitmq=False, include_mysql_db=False, include_sqlserver=False, + include_localstack=False, + include_elasticmq=False, include_otel_drop_in=False, include_buddies=False, require_api_key=False, @@ -281,6 +293,8 @@ def __init__( include_rabbitmq=include_rabbitmq, include_mysql_db=include_mysql_db, include_sqlserver=include_sqlserver, + include_localstack=include_localstack, + include_elasticmq=include_elasticmq, ) self._use_proxy_for_agent = use_proxy_for_agent @@ -388,8 +402,6 @@ def configure(self, config): interfaces.library_dotnet_managed.configure(self.host_log_folder, self.replay) for container in self.buddies: - # a little bit of python wizzardry to solve circular import - container.interface = getattr(interfaces, container.name) container.interface.configure(self.host_log_folder, self.replay) library = self.weblog_container.image.labels["system-tests-library"] diff --git a/utils/_context/_scenarios/integrations.py b/utils/_context/_scenarios/integrations.py index 962f6d647b..0f33d2725a 100644 --- a/utils/_context/_scenarios/integrations.py +++ b/utils/_context/_scenarios/integrations.py @@ -36,6 +36,7 @@ def __init__(self) -> None: "AWS_ACCESS_KEY_ID": "my-access-key", "AWS_SECRET_ACCESS_KEY": "my-access-key", "DD_TRACE_INFERRED_PROXY_SERVICES_ENABLED": "true", + "SYSTEM_TESTS_AWS_URL": "http://localstack-main:4566", "DD_IAST_CONTEXT_MODE": "GLOBAL", }, include_postgres_db=True, @@ -45,6 +46,8 @@ def __init__(self) -> None: include_rabbitmq=True, include_mysql_db=True, include_sqlserver=True, + include_localstack=True, + include_elasticmq=True, include_otel_drop_in=True, doc=( "Spawns tracer, agent, and a full set of database. " @@ -85,6 +88,10 @@ class AWSIntegrationsScenario(EndToEndScenario): 🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫🔴🚫 """ + # Since we are using real AWS queues / topics, we need a unique message to ensure we aren't consuming messages from + # other tests. This time hash is added to the message, test consumers only stops once finding the specific message. + unique_id: str = "" + def __init__( self, name="INTEGRATIONS_AWS", @@ -93,6 +100,8 @@ def __init__( include_kafka=False, include_rabbitmq=False, include_buddies=False, + include_localstack=True, + include_elasticmq=True, ) -> None: super().__init__( name, @@ -100,22 +109,19 @@ def __init__( "DD_TRACE_API_VERSION": "v0.4", "AWS_ACCESS_KEY_ID": "my-access-key", "AWS_SECRET_ACCESS_KEY": "my-access-key", + "SYSTEM_TESTS_AWS_URL": "http://localstack-main:4566", }, doc=doc, include_kafka=include_kafka, include_rabbitmq=include_rabbitmq, include_buddies=include_buddies, + include_localstack=include_localstack, + include_elasticmq=include_elasticmq, scenario_groups=[ScenarioGroup.INTEGRATIONS, ScenarioGroup.ESSENTIALS], ) - # Since we are using real AWS queues / topics, we need a unique message to ensure we aren't consuming messages - # from other tests. This time hash is added to the message, test consumers only stops once finding the specific - # message. - self.unique_id = None def configure(self, config): super().configure(config) - if not self.replay: - self._check_aws_variables() self.unique_id = _get_unique_id(self.host_log_folder, replay=self.replay) def _check_aws_variables(self): @@ -127,15 +133,22 @@ def _check_aws_variables(self): class CrossedTracingLibraryScenario(EndToEndScenario): + unique_id: str = "" + def __init__(self) -> None: super().__init__( "CROSSED_TRACING_LIBRARIES", include_kafka=True, include_buddies=True, include_rabbitmq=True, + include_localstack=True, + include_elasticmq=True, doc="Spawns a buddy for each supported language of APM, requires AWS authentication.", + weblog_env={ + "SYSTEM_TESTS_AWS_URL": "http://localstack-main:4566", + "SYSTEM_TESTS_AWS_SQS_URL": "http://elasticmq:9324", + }, ) - self.unique_id = None def configure(self, config): super().configure(config) diff --git a/utils/_context/_scenarios/parametric.py b/utils/_context/_scenarios/parametric.py index d834896ad5..68743c08bc 100644 --- a/utils/_context/_scenarios/parametric.py +++ b/utils/_context/_scenarios/parametric.py @@ -72,12 +72,12 @@ class APMLibraryTestServer: container_build_context: str = "." container_port: int = 8080 - host_port: int = None # Will be assigned by get_host_port() + host_port: int | None = None # Will be assigned by get_host_port() env: dict[str, str] = dataclasses.field(default_factory=dict) volumes: dict[str, str] = dataclasses.field(default_factory=dict) - container: Container = None + container: Container | None = None class ParametricScenario(Scenario): @@ -120,7 +120,7 @@ def __init__(self, name, doc) -> None: name, doc=doc, github_workflow="parametric", - scenario_groups=[ScenarioGroup.ALL], + scenario_groups=[ScenarioGroup.ALL, ScenarioGroup.TRACER_RELEASE], ) self._parametric_tests_confs = ParametricScenario.PersistentParametricTestConf(self) @@ -209,7 +209,7 @@ def library(self): def weblog_variant(self): return f"parametric-{self.library.library}" - def _build_apm_test_server_image(self) -> str: + def _build_apm_test_server_image(self) -> None: logger.stdout("Build tested container...") apm_test_server_definition: APMLibraryTestServer = self.apm_test_server_definition @@ -227,6 +227,10 @@ def _build_apm_test_server_image(self) -> str: with open(log_path, "w+", encoding="utf-8") as log_file: # Build the container docker = shutil.which("docker") + + if docker is None: + raise FileNotFoundError("Docker not found in PATH") + root_path = ".." cmd = [ docker, @@ -259,7 +263,6 @@ def _build_apm_test_server_image(self) -> str: check=False, ) - failure_text: str = None if p.returncode != 0: log_file.seek(0) failure_text = "".join(log_file.readlines()) @@ -298,7 +301,7 @@ def docker_run( log_file: TextIO, ) -> Generator[Container, None, None]: # Convert volumes to the format expected by the docker-py API - fixed_volumes = {} + fixed_volumes: dict[str, dict] = {} for key, value in volumes.items(): if isinstance(value, dict): fixed_volumes[key] = value @@ -378,7 +381,7 @@ def node_library_factory() -> APMLibraryTestServer: with open("./binaries/nodejs-load-from-local", encoding="utf-8") as f: path = f.read().strip(" \r\n") source = os.path.join(_get_base_directory(), path) - volumes[Path(source).resolve()] = "/volumes/dd-trace-js" + volumes[str(Path(source).resolve())] = "/volumes/dd-trace-js" except FileNotFoundError: logger.info("No local dd-trace-js found, do not mount any volume") diff --git a/utils/_context/containers.py b/utils/_context/containers.py index 1679828bfd..67e17ace51 100644 --- a/utils/_context/containers.py +++ b/utils/_context/containers.py @@ -20,6 +20,7 @@ from utils.tools import logger from utils import interfaces from utils.k8s_lib_injection.k8s_weblog import K8sWeblog +from utils.interfaces._library.core import LibraryInterfaceValidator # fake key of length 32 _FAKE_DD_API_KEY = "0123456789abcdef0123456789abcdef" @@ -100,13 +101,13 @@ def __init__( # None: container did not tried to start yet, or hasn't be started for another reason # False: container is not healthy # True: container is healthy - self.healthy = None + self.healthy: bool | None = None self.environment = environment or {} self.kwargs = kwargs self.depends_on: list[TestedContainer] = [] self._starting_lock = RLock() - self._starting_thread = None + self._starting_thread: Thread | None = None self.stdout_interface = stdout_interface def get_image_list(self, library: str, weblog: str) -> list[str]: # noqa: ARG002 @@ -444,7 +445,7 @@ def __init__(self, image_name: str, *, local_image_only: bool): # local_image_only: boolean # True if the image is only available locally and can't be loaded from any hub - self.env = None + self.env: dict[str, str] | None = None self.labels: dict[str, str] = {} self.name = image_name self.local_image_only = local_image_only @@ -557,7 +558,7 @@ def __init__(self, host_log_folder, *, use_proxy=True, environment=None) -> None local_image_only=True, ) - self.agent_version = "" + self.agent_version: str | None = "" def configure(self, replay): super().configure(replay) @@ -607,15 +608,21 @@ def __init__(self, name, image_name, host_log_folder, host_port, trace_agent_por # "DD_TRACE_DEBUG": "true", "DD_AGENT_HOST": "proxy", "DD_TRACE_AGENT_PORT": trace_agent_port, + "SYSTEM_TESTS_AWS_URL": "http://localstack-main:4566", }, ) - self.interface = None _set_aws_auth_environment(self) + @property + def interface(self) -> LibraryInterfaceValidator: + result = getattr(interfaces, self.name) + assert result is not None, "Interface is not set" + return result + class WeblogContainer(TestedContainer): - appsec_rules_file: str + appsec_rules_file: str | None _dd_rc_tuf_root: dict = { "signed": { "_type": "root", @@ -719,6 +726,7 @@ def __init__( if tracer_sampling_rate: base_environment["DD_TRACE_SAMPLE_RATE"] = str(tracer_sampling_rate) + base_environment["DD_TRACE_SAMPLING_RULES"] = json.dumps([{"sample_rate": tracer_sampling_rate}]) if use_proxy: # set the tracer to send data to runner (it will forward them to the agent) @@ -726,7 +734,7 @@ def __init__( base_environment["DD_TRACE_AGENT_PORT"] = self.trace_agent_port else: base_environment["DD_AGENT_HOST"] = "agent" - base_environment["DD_TRACE_AGENT_PORT"] = AgentContainer.apm_receiver_port + base_environment["DD_TRACE_AGENT_PORT"] = str(AgentContainer.apm_receiver_port) # overwrite values with those set in the scenario environment = base_environment | (environment or {}) @@ -757,7 +765,7 @@ def __init__( self.additional_trace_header_tags = additional_trace_header_tags self.weblog_variant = "" - self._library: LibraryVersion = None + self._library: LibraryVersion | None = None @property def trace_agent_port(self): @@ -777,7 +785,7 @@ def _get_image_list_from_dockerfile(dockerfile) -> list[str]: def get_image_list(self, library: str | None, weblog: str | None) -> list[str]: """Parse the Dockerfile and extract all images reference in a FROM section""" - result = [] + result: list[str] = [] if not library or not weblog: return result @@ -820,7 +828,12 @@ def configure(self, replay): if len(self.additional_trace_header_tags) != 0: self.environment["DD_TRACE_HEADER_TAGS"] += f',{",".join(self.additional_trace_header_tags)}' - self.appsec_rules_file = (self.image.env | self.environment).get("DD_APPSEC_RULES", None) + if "DD_APPSEC_RULES" in self.environment: + self.appsec_rules_file = self.environment["DD_APPSEC_RULES"] + elif self.image.env is not None and "DD_APPSEC_RULES" in self.environment: + self.appsec_rules_file = self.image.env["DD_APPSEC_RULES"] + else: + self.appsec_rules_file = None # Workaround: Once the dd-trace-go fix is merged that avoids a go panic for # DD_TRACE_PROPAGATION_EXTRACT_FIRST=true when context propagation fails, @@ -876,10 +889,12 @@ def post_start(self): @property def library(self) -> LibraryVersion: + assert self._library is not None, "Library version is not set" return self._library @property def uds_socket(self): + assert self.image.env is not None, "No env set" return self.image.env.get("DD_APM_RECEIVER_SOCKET", None) @property @@ -993,6 +1008,41 @@ def __init__(self, host_log_folder) -> None: ) +class ElasticMQContainer(TestedContainer): + def __init__(self, host_log_folder) -> None: + super().__init__( + image_name="softwaremill/elasticmq-native:1.6.11", + name="elasticmq", + host_log_folder=host_log_folder, + environment={"ELASTICMQ_OPTS": "-Dnode-address.hostname=0.0.0.0"}, + ports={9324: 9324}, + volumes={"/var/run/docker.sock": {"bind": "/var/run/docker.sock", "mode": "rw"}}, + allow_old_container=True, + ) + + +class LocalstackContainer(TestedContainer): + def __init__(self, host_log_folder) -> None: + super().__init__( + image_name="localstack/localstack:4.1", + name="localstack-main", + environment={ + "LOCALSTACK_SERVICES": "kinesis,sqs,sns,xray", + "EXTRA_CORS_ALLOWED_HEADERS": "x-amz-request-id,x-amzn-requestid,x-amzn-trace-id", + "EXTRA_CORS_EXPOSE_HEADERS": "x-amz-request-id,x-amzn-requestid,x-amzn-trace-id", + "AWS_DEFAULT_REGION": "us-east-1", + "FORCE_NONINTERACTIVE": "true", + "START_WEB": "0", + "DEBUG": "1", + "SQS_PROVIDER": "elasticmq", + "DOCKER_HOST": "unix:///var/run/docker.sock", + }, + host_log_folder=host_log_folder, + ports={"4566": ("127.0.0.1", 4566)}, + volumes={"/var/run/docker.sock": {"bind": "/var/run/docker.sock", "mode": "rw"}}, + ) + + class MySqlContainer(SqlDbTestedContainer): def __init__(self, host_log_folder) -> None: super().__init__( @@ -1177,7 +1227,7 @@ def __init__(self, host_log_folder) -> None: def get_env(self, env_var): """Get env variables from the container""" - env = self.image.env | self.environment + env = (self.image.env or {}) | self.environment return env.get(env_var) diff --git a/utils/_context/library_version.py b/utils/_context/library_version.py index 9c4761cba2..4e23d0bb3c 100644 --- a/utils/_context/library_version.py +++ b/utils/_context/library_version.py @@ -51,7 +51,7 @@ def __ge__(self, other): class LibraryVersion: - known_versions = defaultdict(set) + known_versions: dict = defaultdict(set) def add_known_version(self, version, library=None): library = self.library if library is None else library diff --git a/utils/_decorators.py b/utils/_decorators.py index 882438a2ff..3cc109e4d9 100644 --- a/utils/_decorators.py +++ b/utils/_decorators.py @@ -57,7 +57,7 @@ def _ensure_jira_ticket_as_reason(item, reason: str): def _add_pytest_marker(item, reason, marker): if inspect.isfunction(item) or inspect.isclass(item): if not hasattr(item, "pytestmark"): - item.pytestmark = [] + item.pytestmark = [] # type: ignore[attr-defined] item.pytestmark.append(marker(reason=reason)) else: diff --git a/utils/_features.py b/utils/_features.py index ff70aaca8b..17f42f55a7 100644 --- a/utils/_features.py +++ b/utils/_features.py @@ -2291,6 +2291,15 @@ def iast_stack_trace(test_object): pytest.mark.features(feature_id=329)(test_object) return test_object + @staticmethod + def iast_extended_location(test_object): + """IAST: Extended location data + + https://feature-parity.us1.prod.dog/#/?feature=364 + """ + pytest.mark.features(feature_id=364)(test_object) + return test_object + @staticmethod def djm_ssi_k8s(test_object): """Data Jobs Monitoring: Java lib auto instrumentation for Spark applications on K8s. @@ -2351,7 +2360,7 @@ def context_propagation_extract_behavior(test_object): https://feature-parity.us1.prod.dog/#/?feature=353 """ - pytest.mark.features(feature_id=343)(test_object) + pytest.mark.features(feature_id=353)(test_object) return test_object @staticmethod @@ -2390,5 +2399,23 @@ def otel_propagators_api(test_object): pytest.mark.features(feature_id=361)(test_object) return test_object + @staticmethod + def stable_configuration_support(test_object): + """Enforces that basic stable configuration support exists + + https://feature-parity.us1.prod.dog/#/?feature=365 + """ + pytest.mark.features(feature_id=365)(test_object) + return test_object + + @staticmethod + def single_span_ingestion_control(test_object): + """Enforces that basic stable configuration support exists + + https://feature-parity.us1.prod.dog/#/?feature=366 + """ + pytest.mark.features(feature_id=366)(test_object) + return test_object + features = _Features() diff --git a/utils/_remote_config.py b/utils/_remote_config.py index bf1b5f1cb5..933209ceb9 100644 --- a/utils/_remote_config.py +++ b/utils/_remote_config.py @@ -65,7 +65,7 @@ def send_state( client_configs = raw_payload.get("client_configs", []) - current_states = {} + current_states: dict[str, Any] = {} version = None targets = json.loads(base64.b64decode(raw_payload["targets"])) version = targets["signed"]["version"] @@ -132,7 +132,7 @@ def send_sequential_commands(commands: list[dict], *, wait_for_all_command: bool if not wait_for_all_command: return - counts_by_runtime_id = {} + counts_by_runtime_id: dict[str, int] = {} def all_payload_sent(data) -> bool: if data["path"] != "/v0.7/config": @@ -207,9 +207,7 @@ def _build_base_command(path_payloads: dict[str, dict | list], version: int): payload_64 = _json_to_base64(payload) payload_length = len(base64.b64decode(payload_64)) - target = {"custom": {"v": 1}, "hashes": {"sha256": ""}, "length": 0} - target["hashes"]["sha256"] = _sha256(payload_64) - target["length"] = payload_length + target = {"custom": {"v": 1}, "hashes": {"sha256": _sha256(payload_64)}, "length": payload_length} signed["signed"]["targets"][path] = target target_file = {"path": path, "raw": payload_64} @@ -234,8 +232,9 @@ def build_debugger_command(probes: list | None, version: int): def build_symdb_command(): - path_payloads = {"datadog/2/LIVE_DEBUGGING_SYMBOL_DB/symDb/config": {"upload_symbols": True}} - return _build_base_command(path_payloads, version=1) + return _build_base_command( + path_payloads={"datadog/2/LIVE_DEBUGGING_SYMBOL_DB/symDb/config": {"upload_symbols": True}}, version=1 + ) def send_debugger_command(probes: list, version: int) -> dict: @@ -274,6 +273,8 @@ def __init__(self, path: str, config, config_file_version=None) -> None: self.raw_sha256 = hashlib.sha256(base64.b64decode(self.raw)).hexdigest() else: stored_config = self._store.get(path, None) + if stored_config is None: + raise ValueError(f"Config for {path} not found") self.raw_length = stored_config.raw_length self.raw_sha256 = stored_config.raw_sha256 diff --git a/utils/_weblog.py b/utils/_weblog.py index 3c2d1263a3..fff9fc15bb 100644 --- a/utils/_weblog.py +++ b/utils/_weblog.py @@ -18,7 +18,7 @@ import utils.grpc.weblog_pb2_grpc as grpcapi # monkey patching header validation in requests module, as we want to be able to send anything to weblog -requests.utils._validate_header_part = lambda *args, **kwargs: None # noqa: ARG005, SLF001 +requests.utils._validate_header_part = lambda *args, **kwargs: None # type: ignore[attr-defined] # noqa: ARG005, SLF001 class ResponseEncoder(json.JSONEncoder): @@ -49,9 +49,9 @@ def serialize(self) -> dict: class HttpRequest: def __init__(self, data): - self.headers = CaseInsensitiveDict(data.get("headers", {})) - self.method = data["method"] - self.url = data["url"] + self.headers: CaseInsensitiveDict = CaseInsensitiveDict(data.get("headers", {})) + self.method: str = data["method"] + self.url: str = data["url"] self.params = data["params"] def __repr__(self) -> str: @@ -63,7 +63,7 @@ def __init__(self, data): self._data = data self.request = HttpRequest(data["request"]) self.status_code = data["status_code"] - self.headers = CaseInsensitiveDict(data.get("headers", {})) + self.headers: CaseInsensitiveDict = CaseInsensitiveDict(data.get("headers", {})) self.text = data["text"] self.cookies = data["cookies"] @@ -143,13 +143,9 @@ def request( else: url = self._get_url(path, domain, port) - response_data = { - "request": {"method": method, "url": url, "headers": headers, "params": params}, - "status_code": None, - "headers": {}, - "text": None, - "cookies": None, - } + status_code = None + response_headers: CaseInsensitiveDict = CaseInsensitiveDict() + text = None timeout = kwargs.pop("timeout", 5) try: @@ -159,18 +155,25 @@ def request( logger.debug(f"Sending request {rid}: {method} {url}") s = requests.Session() - r = s.send(r, timeout=timeout, stream=stream, allow_redirects=allow_redirects) - response_data["status_code"] = r.status_code - response_data["headers"] = r.headers - response_data["text"] = r.text - response_data["cookies"] = requests.utils.dict_from_cookiejar(s.cookies) + response = s.send(r, timeout=timeout, stream=stream, allow_redirects=allow_redirects) + status_code = response.status_code + response_headers = response.headers + text = response.text except Exception as e: logger.error(f"Request {rid} raise an error: {e}") else: - logger.debug(f"Request {rid}: {r.status_code}") - - return HttpResponse(response_data) + logger.debug(f"Request {rid}: {response.status_code}") + + return HttpResponse( + { + "request": {"method": method, "url": url, "headers": headers, "params": params}, + "status_code": status_code, + "headers": response_headers, + "text": text, + "cookies": requests.utils.dict_from_cookiejar(s.cookies), + } + ) def warmup_request(self, domain=None, port=None, timeout=10): requests.get(self._get_url("/", domain, port), timeout=timeout) @@ -206,25 +209,21 @@ def grpc(self, string_value, *, streaming=False): logger.debug(f"Sending grpc request {rid}") - request = pb.Value(string_value=string_value) # pylint: disable=no-member - - response_data = { - "request": {"rid": rid, "string_value": string_value}, - } + request = pb.Value(string_value=string_value) + response_data = None try: if streaming: for response in _grpc_client.ServerStream(request): - response_data["response"] = response.string_value + response_data = response.string_value else: response = _grpc_client.Unary(request) - response_data["response"] = response.string_value + response_data = response.string_value except Exception as e: logger.error(f"Request {rid} raise an error: {e}") - response_data["response"] = None - return GrpcResponse(response_data) + return GrpcResponse({"request": {"rid": rid, "string_value": string_value}, "response": response_data}) weblog = _Weblog() diff --git a/utils/build/build_python_base_images.sh b/utils/build/build_python_base_images.sh index 7e3696b429..15265d30b5 100755 --- a/utils/build/build_python_base_images.sh +++ b/utils/build/build_python_base_images.sh @@ -4,19 +4,19 @@ -docker buildx build --load --progress=plain -f utils/build/docker/python/django-py3.13.base.Dockerfile -t datadog/system-tests:django-py3.13.base-v0 . +docker buildx build --load --progress=plain -f utils/build/docker/python/django-py3.13.base.Dockerfile -t datadog/system-tests:django-py3.13.base-v1 . docker buildx build --load --progress=plain -f utils/build/docker/python/fastapi.base.Dockerfile -t datadog/system-tests:fastapi.base-v4 . -docker buildx build --load --progress=plain -f utils/build/docker/python/python3.12.base.Dockerfile -t datadog/system-tests:python3.12.base-v5 . +docker buildx build --load --progress=plain -f utils/build/docker/python/python3.12.base.Dockerfile -t datadog/system-tests:python3.12.base-v6 . docker buildx build --load --progress=plain -f utils/build/docker/python/django-poc.base.Dockerfile -t datadog/system-tests:django-poc.base-v4 . -docker buildx build --load --progress=plain -f utils/build/docker/python/flask-poc.base.Dockerfile -t datadog/system-tests:flask-poc.base-v7 . +docker buildx build --load --progress=plain -f utils/build/docker/python/flask-poc.base.Dockerfile -t datadog/system-tests:flask-poc.base-v8 . docker buildx build --load --progress=plain -f utils/build/docker/python/uwsgi-poc.base.Dockerfile -t datadog/system-tests:uwsgi-poc.base-v4 . if [ "$1" = "--push" ]; then - docker push datadog/system-tests:django-py3.13.base-v0 + docker push datadog/system-tests:django-py3.13.base-v1 docker push datadog/system-tests:fastapi.base-v4 - docker push datadog/system-tests:python3.12.base-v5 + docker push datadog/system-tests:python3.12.base-v6 docker push datadog/system-tests:django-poc.base-v4 - docker push datadog/system-tests:flask-poc.base-v7 + docker push datadog/system-tests:flask-poc.base-v8 docker push datadog/system-tests:uwsgi-poc.base-v4 fi diff --git a/utils/build/docker/dotnet/weblog/Controllers/ExceptionReplayController.cs b/utils/build/docker/dotnet/weblog/Controllers/ExceptionReplayController.cs index 3d4ecfd459..9b5af67b8d 100644 --- a/utils/build/docker/dotnet/weblog/Controllers/ExceptionReplayController.cs +++ b/utils/build/docker/dotnet/weblog/Controllers/ExceptionReplayController.cs @@ -22,11 +22,19 @@ public IActionResult ExceptionReplaySimple() [HttpGet("recursion")] [Consumes("application/json", "application/xml")] + [System.Runtime.CompilerServices.MethodImpl(System.Runtime.CompilerServices.MethodImplOptions.NoInlining)] public IActionResult exceptionReplayRecursion(int depth) { return exceptionReplayRecursionHelper(depth, depth); } + [HttpGet("recursion_inline")] + [Consumes("application/json", "application/xml")] + public IActionResult exceptionReplayRecursion_inline(int depth) + { + return exceptionReplayRecursionHelper(depth, depth); + } + private IActionResult exceptionReplayRecursionHelper(int originalDepth, int currentDepth) { if (currentDepth > 0) diff --git a/utils/build/docker/dotnet/weblog/Endpoints/DsmEndpoint.cs b/utils/build/docker/dotnet/weblog/Endpoints/DsmEndpoint.cs index 56b64c0040..e1eb9120c1 100644 --- a/utils/build/docker/dotnet/weblog/Endpoints/DsmEndpoint.cs +++ b/utils/build/docker/dotnet/weblog/Endpoints/DsmEndpoint.cs @@ -170,7 +170,19 @@ class SqsProducer { public static async Task DoWork(string queue, string message) { - var sqsClient = new AmazonSQSClient(); + string awsUrl = Environment.GetEnvironmentVariable("SYSTEM_TESTS_AWS_URL"); + + IAmazonSQS sqsClient; + if (!string.IsNullOrEmpty(awsUrl)) + { + // If SYSTEM_TESTS_AWS_URL is set, use it for ServiceURL + sqsClient = new AmazonSQSClient(new AmazonSQSConfig { ServiceURL = awsUrl }); + } + else + { + // If SYSTEM_TESTS_AWS_URL is not set, create a default client + sqsClient = new AmazonSQSClient(); + } // create queue Console.WriteLine($"[SQS] Produce: Creating queue {queue}"); CreateQueueResponse responseCreate = await sqsClient.CreateQueueAsync(queue); @@ -187,7 +199,19 @@ class SqsConsumer { public static async Task DoWork(string queue, string message) { - var sqsClient = new AmazonSQSClient(); + string awsUrl = Environment.GetEnvironmentVariable("SYSTEM_TESTS_AWS_URL"); + + IAmazonSQS sqsClient; + if (!string.IsNullOrEmpty(awsUrl)) + { + // If awsUrl is set, use it for ServiceURL + sqsClient = new AmazonSQSClient(new AmazonSQSConfig { ServiceURL = awsUrl }); + } + else + { + // If awsUrl is not set, create a default client + sqsClient = new AmazonSQSClient(); + } // Create queue Console.WriteLine($"[SQS] Consume: Creating queue {queue}"); CreateQueueResponse responseCreate = await sqsClient.CreateQueueAsync(queue); diff --git a/utils/build/docker/dotnet/weblog/Endpoints/MessagingEndpoints.cs b/utils/build/docker/dotnet/weblog/Endpoints/MessagingEndpoints.cs index cd5b5598c2..6befb11e53 100644 --- a/utils/build/docker/dotnet/weblog/Endpoints/MessagingEndpoints.cs +++ b/utils/build/docker/dotnet/weblog/Endpoints/MessagingEndpoints.cs @@ -133,7 +133,19 @@ private static bool RabbitConsume(string queue, TimeSpan timeout) private static async Task SqsProduce(string queue, string message) { - var sqsClient = new AmazonSQSClient(); + string awsUrl = Environment.GetEnvironmentVariable("SYSTEM_TESTS_AWS_URL"); + + IAmazonSQS sqsClient; + if (!string.IsNullOrEmpty(awsUrl)) + { + // If SYSTEM_TESTS_AWS_URL is set, use it for ServiceURL + sqsClient = new AmazonSQSClient(new AmazonSQSConfig { ServiceURL = awsUrl }); + } + else + { + // If SYSTEM_TESTS_AWS_URL is not set, create a default client + sqsClient = new AmazonSQSClient(); + } var responseCreate = await sqsClient.CreateQueueAsync(queue); var qUrl = responseCreate.QueueUrl; await sqsClient.SendMessageAsync(qUrl, message); @@ -143,7 +155,20 @@ private static async Task SqsProduce(string queue, string message) private static async Task SqsConsume(string queue, TimeSpan timeout, string message) { Console.WriteLine($"consuming one message from SQS queue {queue} in max {(int)timeout.TotalSeconds} seconds"); - var sqsClient = new AmazonSQSClient(); + + string awsUrl = Environment.GetEnvironmentVariable("SYSTEM_TESTS_AWS_URL"); + + IAmazonSQS sqsClient; + if (!string.IsNullOrEmpty(awsUrl)) + { + // If SYSTEM_TESTS_AWS_URL is set, use it for ServiceURL + sqsClient = new AmazonSQSClient(new AmazonSQSConfig { ServiceURL = awsUrl }); + } + else + { + // If SYSTEM_TESTS_AWS_URL is not set, create a default client + sqsClient = new AmazonSQSClient(); + } var responseCreate = await sqsClient.CreateQueueAsync(queue); var qUrl = responseCreate.QueueUrl; diff --git a/utils/build/docker/java/akka-http/pom.xml b/utils/build/docker/java/akka-http/pom.xml index 6489065085..46b3a3f9af 100644 --- a/utils/build/docker/java/akka-http/pom.xml +++ b/utils/build/docker/java/akka-http/pom.xml @@ -96,6 +96,26 @@ spray-json_2.13 1.3.6 + + org.apache.commons + commons-lang3 + 3.17.0 + + + javax.mail + javax.mail-api + 1.6.2 + + + javax.activation + activation + 1.1.1 + + + com.sun.mail + javax.mail + 1.6.2 + diff --git a/utils/build/docker/java/iast-common/pom.xml b/utils/build/docker/java/iast-common/pom.xml index aa5db23f23..a26dc2d1aa 100644 --- a/utils/build/docker/java/iast-common/pom.xml +++ b/utils/build/docker/java/iast-common/pom.xml @@ -34,6 +34,30 @@ 6.0.8 true + + org.apache.commons + commons-lang3 + 3.17.0 + true + + + javax.mail + javax.mail-api + 1.6.2 + true + + + javax.activation + activation + 1.1.1 + true + + + com.sun.mail + javax.mail + 1.6.2 + true + diff --git a/utils/build/docker/java/iast-common/src/main/java/com/datadoghq/system_tests/iast/utils/EmailExamples.java b/utils/build/docker/java/iast-common/src/main/java/com/datadoghq/system_tests/iast/utils/EmailExamples.java new file mode 100644 index 0000000000..d088251911 --- /dev/null +++ b/utils/build/docker/java/iast-common/src/main/java/com/datadoghq/system_tests/iast/utils/EmailExamples.java @@ -0,0 +1,31 @@ +package com.datadoghq.system_tests.iast.utils; + +import com.datadoghq.system_tests.iast.utils.mock.MockTransport; + +import javax.mail.Message; +import javax.mail.MessagingException; +import javax.mail.Session; +import javax.mail.Provider; +import javax.mail.internet.InternetAddress; +import javax.mail.internet.MimeMessage; +import java.util.Properties; + + +public class EmailExamples { + + public void mail(final String emailContent) throws MessagingException { + Session session = Session.getDefaultInstance(new Properties()); + Provider provider = + new Provider( + Provider.Type.TRANSPORT, "smtp", MockTransport.class.getName(), "MockTransport", "1.0"); + session.setProvider(provider); + Message email = new MimeMessage(session); + email.setContent(emailContent, "text/html"); + email.setRecipient(Message.RecipientType.TO, new InternetAddress("abc@datadoghq.com")); + + MockTransport.send(email); + + } + + +} diff --git a/utils/build/docker/java/iast-common/src/main/java/com/datadoghq/system_tests/iast/utils/mock/MockTransport.java b/utils/build/docker/java/iast-common/src/main/java/com/datadoghq/system_tests/iast/utils/mock/MockTransport.java new file mode 100644 index 0000000000..6645e0f5a7 --- /dev/null +++ b/utils/build/docker/java/iast-common/src/main/java/com/datadoghq/system_tests/iast/utils/mock/MockTransport.java @@ -0,0 +1,26 @@ +package com.datadoghq.system_tests.iast.utils.mock; +import javax.mail.Message; +import javax.mail.MessagingException; +import javax.mail.Session; +import javax.mail.Transport; +import javax.mail.URLName; +import javax.mail.Address; + +public class MockTransport extends Transport { + public MockTransport(Session session, URLName urlname) { + super(session, urlname); + } + + public void sendMessage(Message msg, Address[] addresses) throws MessagingException { + this.notifyTransportListeners(1, addresses, new Address[0], new Address[0], msg); + + } + + @Override + public void connect() { + this.setConnected(true); + this.notifyConnectionListeners(1); + } + + public synchronized void connect(String host, int port, String user, String password) {} + } \ No newline at end of file diff --git a/utils/build/docker/java/jersey-grizzly2/pom.xml b/utils/build/docker/java/jersey-grizzly2/pom.xml index 4972445c28..faf80c2758 100644 --- a/utils/build/docker/java/jersey-grizzly2/pom.xml +++ b/utils/build/docker/java/jersey-grizzly2/pom.xml @@ -89,6 +89,26 @@ jackson-databind 2.12.3 + + org.apache.commons + commons-lang3 + 3.17.0 + + + javax.mail + javax.mail-api + 1.6.2 + + + javax.activation + activation + 1.1.1 + + + com.sun.mail + javax.mail + 1.6.2 + diff --git a/utils/build/docker/java/parametric/pom.xml b/utils/build/docker/java/parametric/pom.xml index 711b011fd2..3ca801a4fa 100644 --- a/utils/build/docker/java/parametric/pom.xml +++ b/utils/build/docker/java/parametric/pom.xml @@ -55,6 +55,26 @@ opentelemetry-api ${opentelemetry.version} + + org.apache.commons + commons-lang3 + 3.17.0 + + + javax.mail + javax.mail-api + 1.6.2 + + + javax.activation + activation + 1.1.1 + + + com.sun.mail + javax.mail + 1.6.2 + diff --git a/utils/build/docker/java/play/pom.xml b/utils/build/docker/java/play/pom.xml index f26eaa1ca2..45e397169a 100644 --- a/utils/build/docker/java/play/pom.xml +++ b/utils/build/docker/java/play/pom.xml @@ -81,6 +81,26 @@ hsqldb 2.7.1 + + org.apache.commons + commons-lang3 + 3.17.0 + + + javax.mail + javax.mail-api + 1.6.2 + + + javax.activation + activation + 1.1.1 + + + com.sun.mail + javax.mail + 1.6.2 + diff --git a/utils/build/docker/java/ratpack/pom.xml b/utils/build/docker/java/ratpack/pom.xml index 2d738d1d92..2dfe32f183 100644 --- a/utils/build/docker/java/ratpack/pom.xml +++ b/utils/build/docker/java/ratpack/pom.xml @@ -66,6 +66,26 @@ jackson-core LATEST + + org.apache.commons + commons-lang3 + 3.17.0 + + + javax.mail + javax.mail-api + 1.6.2 + + + javax.activation + activation + 1.1.1 + + + com.sun.mail + javax.mail + 1.6.2 + diff --git a/utils/build/docker/java/resteasy-netty3/pom.xml b/utils/build/docker/java/resteasy-netty3/pom.xml index b37f014641..92a701e748 100644 --- a/utils/build/docker/java/resteasy-netty3/pom.xml +++ b/utils/build/docker/java/resteasy-netty3/pom.xml @@ -87,7 +87,26 @@ jackson-core 2.17.1 - + + org.apache.commons + commons-lang3 + 3.17.0 + + + javax.mail + javax.mail-api + 1.6.2 + + + javax.activation + activation + 1.1.1 + + + com.sun.mail + javax.mail + 1.6.2 + diff --git a/utils/build/docker/java/spring-boot-3-native.Dockerfile b/utils/build/docker/java/spring-boot-3-native.Dockerfile index bb2d7fc9ca..d3c6b80c86 100644 --- a/utils/build/docker/java/spring-boot-3-native.Dockerfile +++ b/utils/build/docker/java/spring-boot-3-native.Dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/graalvm/native-image-community:21.0.0 as build +FROM ghcr.io/graalvm/native-image-community:22.0.0 as build ENV JAVA_TOOL_OPTIONS="-Djava.net.preferIPv4Stack=true" diff --git a/utils/build/docker/java/spring-boot-3-native/pom.xml b/utils/build/docker/java/spring-boot-3-native/pom.xml index 1aa10676f7..ed432c1164 100644 --- a/utils/build/docker/java/spring-boot-3-native/pom.xml +++ b/utils/build/docker/java/spring-boot-3-native/pom.xml @@ -54,6 +54,26 @@ javax.servlet-api 4.0.1 + + org.apache.commons + commons-lang3 + 3.17.0 + + + javax.mail + javax.mail-api + 1.6.2 + + + javax.activation + activation + 1.1.1 + + + com.sun.mail + javax.mail + 1.6.2 + diff --git a/utils/build/docker/java/spring-boot/pom.xml b/utils/build/docker/java/spring-boot/pom.xml index 14e2d0cb97..212234b216 100644 --- a/utils/build/docker/java/spring-boot/pom.xml +++ b/utils/build/docker/java/spring-boot/pom.xml @@ -199,6 +199,26 @@ kinesis 2.17.85 + + org.apache.commons + commons-lang3 + 3.17.0 + + + javax.mail + javax.mail-api + 1.6.2 + + + javax.activation + activation + 1.1.1 + + + com.sun.mail + javax.mail + 1.6.2 + diff --git a/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/App.java b/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/App.java index 4bbabf87ff..99cf8b5f5f 100644 --- a/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/App.java +++ b/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/App.java @@ -426,7 +426,9 @@ ResponseEntity sqsProduce( @RequestParam(required = true) String queue, @RequestParam(required = true) String message ) { - SqsConnector sqs = new SqsConnector(queue); + String systemTestsAwsUrl = System.getenv("SYSTEM_TESTS_AWS_URL"); + + SqsConnector sqs = new SqsConnector(queue, systemTestsAwsUrl); try { sqs.produceMessageWithoutNewThread(message); } catch (Exception e) { @@ -443,7 +445,9 @@ ResponseEntity sqsConsume( @RequestParam(required = false) Integer timeout, @RequestParam(required = true) String message ) { - SqsConnector sqs = new SqsConnector(queue); + String systemTestsAwsUrl = System.getenv("SYSTEM_TESTS_AWS_URL"); + + SqsConnector sqs = new SqsConnector(queue, systemTestsAwsUrl); if (timeout == null) timeout = 60; boolean consumed = false; try { @@ -462,8 +466,10 @@ ResponseEntity snsProduce( @RequestParam(required = true) String topic, @RequestParam(required = true) String message ) { + String systemTestsAwsUrl = System.getenv("SYSTEM_TESTS_AWS_URL"); + SnsConnector sns = new SnsConnector(topic); - SqsConnector sqs = new SqsConnector(queue); + SqsConnector sqs = new SqsConnector(queue, systemTestsAwsUrl); try { sns.produceMessageWithoutNewThread(message, sqs); } catch (Exception e) { @@ -480,7 +486,9 @@ ResponseEntity snsConsume( @RequestParam(required = false) Integer timeout, @RequestParam(required = true) String message ) { - SqsConnector sqs = new SqsConnector(queue); + String systemTestsAwsUrl = System.getenv("SYSTEM_TESTS_AWS_URL"); + + SqsConnector sqs = new SqsConnector(queue, systemTestsAwsUrl); if (timeout == null) timeout = 60; boolean consumed = false; try { @@ -645,7 +653,9 @@ String publishToKafka( return "failed to start consuming message"; } } else if ("sqs".equals(integration)) { - SqsConnector sqs = new SqsConnector(queue); + String systemTestsAwsUrl = System.getenv("SYSTEM_TESTS_AWS_URL"); + + SqsConnector sqs = new SqsConnector(queue, systemTestsAwsUrl); try { Thread produceThread = sqs.startProducingMessage(message); produceThread.join(this.PRODUCE_CONSUME_THREAD_TIMEOUT); @@ -663,8 +673,10 @@ String publishToKafka( return "[SQS] failed to start consuming message"; } } else if ("sns".equals(integration)) { + String systemTestsAwsUrl = System.getenv("SYSTEM_TESTS_AWS_URL"); + SnsConnector sns = new SnsConnector(topic); - SqsConnector sqs = new SqsConnector(queue); + SqsConnector sqs = new SqsConnector(queue, systemTestsAwsUrl); try { Thread produceThread = sns.startProducingMessage(message, sqs); produceThread.join(this.PRODUCE_CONSUME_THREAD_TIMEOUT); diff --git a/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/AppSecIast.java b/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/AppSecIast.java index 62754180c8..eabe9f6465 100644 --- a/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/AppSecIast.java +++ b/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/AppSecIast.java @@ -9,6 +9,7 @@ import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; +import javax.mail.MessagingException; import javax.naming.Context; import javax.naming.NamingException; import javax.naming.directory.InitialDirContext; @@ -21,6 +22,8 @@ import java.io.IOException; import java.util.Hashtable; +import org.apache.commons.lang3.StringEscapeUtils; + @RestController @RequestMapping("/iast") public class AppSecIast { @@ -38,6 +41,7 @@ public class AppSecIast { private final HardcodedSecretExamples hardcodedSecretExamples; private final ReflectionExamples reflectionExamples; private final DeserializationExamples deserializationExamples; + private final EmailExamples emailExamples; public AppSecIast(final DataSource dataSource) { @@ -52,6 +56,7 @@ public AppSecIast(final DataSource dataSource) { this.hardcodedSecretExamples = new HardcodedSecretExamples(); this.reflectionExamples = new ReflectionExamples(); this.deserializationExamples = new DeserializationExamples(); + this.emailExamples = new EmailExamples(); } @RequestMapping("/hardcoded_secrets/test_insecure") @@ -452,6 +457,17 @@ void scSOverloadedInsecure(final ServletRequest request, final ServletResponse cmdExamples.insecureCmd(sanitized); } + @PostMapping("/email_html_injection/test_insecure") + void emailHtmlInjectionInsecure(final HttpServletRequest request) throws MessagingException { + String email = request.getParameter("username"); + emailExamples.mail(email); + } + + @PostMapping("/email_html_injection/test_secure") + void emailHtmlInjectionSecure(final HttpServletRequest request) throws MessagingException { + String email = request.getParameter("username"); + emailExamples.mail(StringEscapeUtils.escapeHtml4(email)); + } /** * TODO: Ldap is failing to startup in native image this method ensures it's started lazily diff --git a/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/aws/KinesisConnector.java b/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/aws/KinesisConnector.java index bde535725a..c09e6d6405 100644 --- a/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/aws/KinesisConnector.java +++ b/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/aws/KinesisConnector.java @@ -7,6 +7,7 @@ import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.kinesis.KinesisClient; +import software.amazon.awssdk.services.kinesis.KinesisClientBuilder; import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest; import software.amazon.awssdk.services.kinesis.model.CreateStreamResponse; import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest; @@ -35,10 +36,19 @@ public KinesisConnector(String stream){ } public KinesisClient createKinesisClient() { - KinesisClient kinesisClient = KinesisClient.builder() + KinesisClientBuilder builder = KinesisClient.builder() .region(this.region) - .credentialsProvider(EnvironmentVariableCredentialsProvider.create()) - .build(); + .credentialsProvider(EnvironmentVariableCredentialsProvider.create()); + + // Read the SYSTEM_TESTS_AWS_URL environment variable + String systemTestsAwsUrl = System.getenv("SYSTEM_TESTS_AWS_URL"); + + // Only override endpoint if SYSTEM_TESTS_AWS_URL is set + if (systemTestsAwsUrl != null && !systemTestsAwsUrl.isEmpty()) { + builder.endpointOverride(URI.create(systemTestsAwsUrl)); + } + + KinesisClient kinesisClient = builder.build(); return kinesisClient; } diff --git a/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/aws/SnsConnector.java b/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/aws/SnsConnector.java index d4c0d1a478..d24b3424a0 100644 --- a/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/aws/SnsConnector.java +++ b/utils/build/docker/java/spring-boot/src/main/java/com/datadoghq/system_tests/springboot/aws/SnsConnector.java @@ -7,6 +7,7 @@ import software.amazon.awssdk.services.sqs.model.QueueAttributeName; import software.amazon.awssdk.services.sqs.model.SetQueueAttributesRequest; import software.amazon.awssdk.services.sns.SnsClient; +import software.amazon.awssdk.services.sns.SnsClientBuilder; import software.amazon.awssdk.services.sns.model.CreateTopicRequest; import software.amazon.awssdk.services.sns.model.CreateTopicResponse; import software.amazon.awssdk.services.sns.model.PublishRequest; @@ -30,10 +31,19 @@ public SnsConnector(String topic){ } private static SnsClient createSnsClient() { - SnsClient snsClient = SnsClient.builder() + SnsClientBuilder builder = SnsClient.builder() .region(Region.US_EAST_1) - .credentialsProvider(EnvironmentVariableCredentialsProvider.create()) - .build(); + .credentialsProvider(EnvironmentVariableCredentialsProvider.create()); + + // Read the SYSTEM_TESTS_AWS_URL environment variable + String systemTestsAwsUrl = System.getenv("SYSTEM_TESTS_AWS_URL"); + + // Only override endpoint if SYSTEM_TESTS_AWS_URL is set + if (systemTestsAwsUrl != null && !systemTestsAwsUrl.isEmpty()) { + builder.endpointOverride(URI.create(systemTestsAwsUrl)); + } + + SnsClient snsClient = builder.build(); return snsClient; } diff --git a/utils/build/docker/java/vertx3/pom.xml b/utils/build/docker/java/vertx3/pom.xml index 70854dcf2b..ae2167ecde 100644 --- a/utils/build/docker/java/vertx3/pom.xml +++ b/utils/build/docker/java/vertx3/pom.xml @@ -75,6 +75,26 @@ okhttp 3.0.0 + + org.apache.commons + commons-lang3 + 3.17.0 + + + javax.mail + javax.mail-api + 1.6.2 + + + javax.activation + activation + 1.1.1 + + + com.sun.mail + javax.mail + 1.6.2 + diff --git a/utils/build/docker/java/vertx4/pom.xml b/utils/build/docker/java/vertx4/pom.xml index 7762cfb024..4042ac4acb 100644 --- a/utils/build/docker/java/vertx4/pom.xml +++ b/utils/build/docker/java/vertx4/pom.xml @@ -75,6 +75,26 @@ okhttp 3.0.0 + + org.apache.commons + commons-lang3 + 3.17.0 + + + javax.mail + javax.mail-api + 1.6.2 + + + javax.activation + activation + 1.1.1 + + + com.sun.mail + javax.mail + 1.6.2 + diff --git a/utils/build/docker/java_otel/spring-boot/pom.xml b/utils/build/docker/java_otel/spring-boot/pom.xml index 14e2d0cb97..212234b216 100644 --- a/utils/build/docker/java_otel/spring-boot/pom.xml +++ b/utils/build/docker/java_otel/spring-boot/pom.xml @@ -199,6 +199,26 @@ kinesis 2.17.85 + + org.apache.commons + commons-lang3 + 3.17.0 + + + javax.mail + javax.mail-api + 1.6.2 + + + javax.activation + activation + 1.1.1 + + + com.sun.mail + javax.mail + 1.6.2 + diff --git a/utils/build/docker/nodejs/express/graphql.js b/utils/build/docker/nodejs/express/graphql.js index 6c39dc0d07..958871f9a2 100644 --- a/utils/build/docker/nodejs/express/graphql.js +++ b/utils/build/docker/nodejs/express/graphql.js @@ -1,6 +1,6 @@ 'use strict' -const { ApolloServer, gql } = require('apollo-server-express') +const { ApolloServer, gql, ApolloError } = require('apollo-server-express') const { readFileSync } = require('fs') const users = [ @@ -32,16 +32,21 @@ const typeDefs = gql` id: Int name: String } +` - type Error { - message: String - extensions: [Extension] - } - - type Extension { - key: String - value: String - }` +// Custom GraphQL error class +class CustomGraphQLError extends ApolloError { + constructor (message, code, properties) { + super(message, properties) + this.extensions.code = code + this.extensions.int = 1 + this.extensions.float = 1.1 + this.extensions.str = '1' + this.extensions.bool = true + this.extensions.other = [1, 'foo'] + this.extensions.not_captured = 'nope' + } +} function getUser (parent, args) { return users.find((item) => args.id === item.id) @@ -61,7 +66,7 @@ function testInjection (parent, args) { } function withError (parent, args) { - throw new Error('test error') + throw new CustomGraphQLError('test error', 'CUSTOM_USER_DEFINED_ERROR', 'Some extra context about the error.') } const resolvers = { @@ -73,22 +78,8 @@ const resolvers = { } } -// Custom error formatting -const formatError = (error) => { - return { - message: error.message, - extensions: [ - { key: 'int-1', value: '1' }, - { key: 'str-1', value: '1' }, - { key: 'array-1-2', value: [1, '2'] }, - { key: 'empty', value: 'empty string' }, - { key: 'comma', value: 'comma' } - ] - } -} - module.exports = async function (app) { - const server = new ApolloServer({ typeDefs, resolvers, formatError }) + const server = new ApolloServer({ typeDefs, resolvers }) await server.start() server.applyMiddleware({ app }) } diff --git a/utils/build/docker/nodejs/express/integrations/messaging/aws/kinesis.js b/utils/build/docker/nodejs/express/integrations/messaging/aws/kinesis.js index 388b3046b2..384b8a4b9b 100644 --- a/utils/build/docker/nodejs/express/integrations/messaging/aws/kinesis.js +++ b/utils/build/docker/nodejs/express/integrations/messaging/aws/kinesis.js @@ -1,10 +1,13 @@ const AWS = require('aws-sdk') const tracer = require('dd-trace') +const { AWS_HOST } = require('./shared') + const kinesisProduce = (stream, message, partitionKey = '1', timeout = 60000) => { // Create a Kinesis client const kinesis = new AWS.Kinesis({ - region: 'us-east-1' + region: 'us-east-1', + endpoint: AWS_HOST }) message = JSON.stringify({ message }) @@ -66,7 +69,10 @@ const kinesisProduce = (stream, message, partitionKey = '1', timeout = 60000) => const kinesisConsume = (stream, timeout = 60000, message, sequenceNumber) => { // Create a Kinesis client - const kinesis = new AWS.Kinesis() + const kinesis = new AWS.Kinesis({ + region: 'us-east-1', + endpoint: AWS_HOST + }) console.log(`[Kinesis] Looking for the following message for stream: ${stream}: ${message}`) diff --git a/utils/build/docker/nodejs/express/integrations/messaging/aws/shared.js b/utils/build/docker/nodejs/express/integrations/messaging/aws/shared.js new file mode 100644 index 0000000000..c9c25199c0 --- /dev/null +++ b/utils/build/docker/nodejs/express/integrations/messaging/aws/shared.js @@ -0,0 +1,4 @@ +const AWS_HOST = process.env.SYSTEM_TESTS_AWS_URL ?? 'https://sns.us-east-1.amazonaws.com' +const AWS_ACCT = process.env.SYSTEM_TESTS_AWS_URL ? '000000000000' : '601427279990' + +module.exports = { AWS_HOST, AWS_ACCT } diff --git a/utils/build/docker/nodejs/express/integrations/messaging/aws/sns.js b/utils/build/docker/nodejs/express/integrations/messaging/aws/sns.js index 14988d5197..2baf09462c 100644 --- a/utils/build/docker/nodejs/express/integrations/messaging/aws/sns.js +++ b/utils/build/docker/nodejs/express/integrations/messaging/aws/sns.js @@ -1,13 +1,21 @@ const AWS = require('aws-sdk') const tracer = require('dd-trace') +const { AWS_HOST, AWS_ACCT } = require('./shared') + let TopicArn let QueueUrl const snsPublish = (queue, topic, message) => { // Create an SQS client - const sns = new AWS.SNS() - const sqs = new AWS.SQS() + const sns = new AWS.SNS({ + region: 'us-east-1', + endpoint: AWS_HOST + }) + const sqs = new AWS.SQS({ + region: 'us-east-1', + endpoint: AWS_HOST + }) const messageToSend = message ?? 'Hello from SNS JavaScript injection' @@ -20,13 +28,15 @@ const snsPublish = (queue, topic, message) => { TopicArn = data.TopicArn - sqs.createQueue({ QueueName: queue }, (err) => { + sqs.createQueue({ QueueName: queue }, (err, data) => { if (err) { console.log(err) reject(err) } - QueueUrl = `https://sqs.us-east-1.amazonaws.com/601427279990/${queue}` + console.log(data) + + QueueUrl = `${AWS_HOST}/${AWS_ACCT}/${queue}` sqs.getQueueAttributes({ QueueUrl, AttributeNames: ['All'] }, (err, data) => { if (err) { @@ -34,6 +44,8 @@ const snsPublish = (queue, topic, message) => { reject(err) } + console.log('sns data') + console.log(data) const QueueArn = data.Attributes.QueueArn const policy = { @@ -102,9 +114,12 @@ const snsPublish = (queue, topic, message) => { const snsConsume = async (queue, timeout, expectedMessage) => { // Create an SQS client - const sqs = new AWS.SQS() + const sqs = new AWS.SQS({ + region: 'us-east-1', + endpoint: AWS_HOST + }) - const queueUrl = `https://sqs.us-east-1.amazonaws.com/601427279990/${queue}` + const queueUrl = `${AWS_HOST}/${AWS_ACCT}/${queue}` return new Promise((resolve, reject) => { let messageFound = false diff --git a/utils/build/docker/nodejs/express/integrations/messaging/aws/sqs.js b/utils/build/docker/nodejs/express/integrations/messaging/aws/sqs.js index bcfc38ccc4..264d0c687a 100644 --- a/utils/build/docker/nodejs/express/integrations/messaging/aws/sqs.js +++ b/utils/build/docker/nodejs/express/integrations/messaging/aws/sqs.js @@ -1,9 +1,14 @@ const AWS = require('aws-sdk') const tracer = require('dd-trace') +const { AWS_HOST, AWS_ACCT } = require('./shared') + const sqsProduce = (queue, message) => { // Create an SQS client - const sqs = new AWS.SQS() + const sqs = new AWS.SQS({ + region: 'us-east-1', + endpoint: AWS_HOST + }) const messageToSend = message ?? 'Hello from SQS JavaScript injection' @@ -18,7 +23,7 @@ const sqsProduce = (queue, message) => { // Send messages to the queue const produce = () => { sqs.sendMessage({ - QueueUrl: `https://sqs.us-east-1.amazonaws.com/601427279990/${queue}`, + QueueUrl: `${AWS_HOST}/${AWS_ACCT}/${queue}`, MessageBody: messageToSend }, (err, data) => { if (err) { @@ -41,10 +46,15 @@ const sqsProduce = (queue, message) => { const sqsConsume = async (queue, timeout, expectedMessage) => { // Create an SQS client - const sqs = new AWS.SQS() + const sqs = new AWS.SQS({ + region: 'us-east-1', + endpoint: AWS_HOST + }) + + const queueUrl = `${AWS_HOST}/${AWS_ACCT}/${queue}` - const queueUrl = `https://sqs.us-east-1.amazonaws.com/601427279990/${queue}` console.log(`[SQS] Looking for message: ${expectedMessage} in queue: ${queue}`) + return new Promise((resolve, reject) => { let messageFound = false diff --git a/utils/build/docker/php/common/rasp/multiple.php b/utils/build/docker/php/common/rasp/multiple.php new file mode 100644 index 0000000000..a258366cee --- /dev/null +++ b/utils/build/docker/php/common/rasp/multiple.php @@ -0,0 +1,6 @@ +SQS] Created SNS Topic: {topic} and SQS Queue: {queue}") + logging.info(f"[SNS->SQS] Created SNS Topic: {topic} and SQS Queue: {queue}") except Exception as e: - print(f"[SNS->SQS] Error during Python SNS create topic or SQS create queue: {str(e)}") + logging.error(f"[SNS->SQS] Error during Python SNS create topic or SQS create queue: {str(e)}") try: # Send the message to the SNS topic sns.publish(TopicArn=topic_arn, Message=message) - print("[SNS->SQS] Python SNS messaged published successfully") + logging.info("[SNS->SQS] Python SNS messaged published successfully") return "SNS Produce ok" except Exception as e: - print(f"[SNS->SQS] Error during Python SNS publish message: {str(e)}") + logging.error(f"[SNS->SQS] Error during Python SNS publish message: {str(e)}") return {"error": f"[SNS->SQS] Error during Python SNS publish message: {str(e)}"} @@ -60,18 +66,34 @@ def sns_consume(queue, expectedMessage, timeout=60): """ # Create an SQS client - sqs = boto3.client("sqs", region_name="us-east-1") + sqs = boto3.client("sqs", region_name="us-east-1", endpoint_url=SQS_HOST) + + start = time.time() + queue_found = False + queue_url = None + + while not queue_found and time.time() < start + timeout: + try: + data = sqs.get_queue_url(QueueName=queue) + queue_found = True + logging.info(f"Found SQS Queue details with name: {queue}") + logging.info(data) + logging.info(data.get("QueueUrl")) + queue_url = data.get("QueueUrl") + except Exception as e: + logging.info(f"Error during Python SQS get queue details: {str(e)}") + time.sleep(1) consumed_message = None start_time = time.time() while not consumed_message and time.time() - start_time < timeout: try: - response = sqs.receive_message(QueueUrl=f"https://sqs.us-east-1.amazonaws.com/601427279990/{queue}") + response = sqs.receive_message(QueueUrl=queue_url) if response and "Messages" in response: for message in response["Messages"]: - print("[SNS->SQS] Consumed: ") - print(message) + logging.info("[SNS->SQS] Consumed: ") + logging.info(message) if message["Body"] == expectedMessage: consumed_message = message["Body"] logging.info("[SNS->SQS] Success. Found the following message: " + consumed_message) @@ -79,15 +101,15 @@ def sns_consume(queue, expectedMessage, timeout=60): else: # entire message may be json within the body try: - print("[SNS->SQS] Trying to decode raw message: ") - print(message.get("Body", "")) + logging.info("[SNS->SQS] Trying to decode raw message: ") + logging.info(message.get("Body", "")) message_json = json.loads(message["Body"]) if message_json.get("Message", "") == expectedMessage: consumed_message = message_json["Message"] - print("[SNS->SQS] Success. Found the following message: " + consumed_message) + logging.info("[SNS->SQS] Success. Found the following message: " + consumed_message) break except Exception as e: - print(e) + logging.error(e) pass except Exception as e: diff --git a/utils/build/docker/python/flask/integrations/messaging/aws/sqs.py b/utils/build/docker/python/flask/integrations/messaging/aws/sqs.py index d8806ad7f0..2728fb1edd 100644 --- a/utils/build/docker/python/flask/integrations/messaging/aws/sqs.py +++ b/utils/build/docker/python/flask/integrations/messaging/aws/sqs.py @@ -1,26 +1,35 @@ import logging +import os import time import boto3 +HOST = os.getenv("SYSTEM_TESTS_AWS_URL", "https://sqs.us-east-1.amazonaws.com/601427279990") +AWS_ACCT = "000000000000" if "localstack" in HOST else "601427279990" + + def sqs_produce(queue, message, timeout=60): """ The goal of this function is to trigger sqs producer calls """ # Create an SQS client - sqs = boto3.client("sqs", region_name="us-east-1") + sqs = boto3.client("sqs", region_name="us-east-1", endpoint_url=HOST) start = time.time() queue_created = False exc = None + queue_url = None while not queue_created and time.time() < start + timeout: try: - sqs.create_queue(QueueName=queue) + data = sqs.create_queue(QueueName=queue) queue_created = True logging.info(f"Created SQS Queue with name: {queue}") + logging.info(data) + logging.info(data.get("QueueUrl")) + queue_url = data.get("QueueUrl") except Exception as e: exc = e logging.info(f"Error during Python SQS create queue: {str(e)}") @@ -30,7 +39,7 @@ def sqs_produce(queue, message, timeout=60): while not message_sent and time.time() < start + timeout: try: # Send the message to the SQS queue - sqs.send_message(QueueUrl=f"https://sqs.us-east-1.amazonaws.com/601427279990/{queue}", MessageBody=message) + sqs.send_message(QueueUrl=queue_url, MessageBody=message) message_sent = True except Exception as e: exc = e @@ -49,14 +58,30 @@ def sqs_consume(queue, expectedMessage, timeout=60): The goal of this function is to trigger sqs consumer calls """ # Create an SQS client - sqs = boto3.client("sqs", region_name="us-east-1") + sqs = boto3.client("sqs", region_name="us-east-1", endpoint_url=HOST) + + start = time.time() + queue_found = False + queue_url = None + + while not queue_found and time.time() < start + timeout: + try: + data = sqs.get_queue_url(QueueName=queue) + queue_found = True + logging.info(f"Found SQS Queue details with name: {queue}") + logging.info(data) + logging.info(data.get("QueueUrl")) + queue_url = data.get("QueueUrl") + except Exception as e: + logging.info(f"Error during Python SQS get queue details: {str(e)}") + time.sleep(1) consumed_message = None start_time = time.time() while not consumed_message and time.time() - start_time < timeout: try: - response = sqs.receive_message(QueueUrl=f"https://sqs.us-east-1.amazonaws.com/601427279990/{queue}") + response = sqs.receive_message(QueueUrl=queue_url) if response and "Messages" in response: for message in response["Messages"]: if message["Body"] == expectedMessage: diff --git a/utils/build/docker/python/flask/integrations/messaging/rabbitmq.py b/utils/build/docker/python/flask/integrations/messaging/rabbitmq.py index 6a2c45815b..8fe8712173 100644 --- a/utils/build/docker/python/flask/integrations/messaging/rabbitmq.py +++ b/utils/build/docker/python/flask/integrations/messaging/rabbitmq.py @@ -1,13 +1,10 @@ import kombu -from ddtrace.trace import tracer, Pin - def rabbitmq_produce(queue, exchange, routing_key, message): conn = kombu.Connection("amqp://rabbitmq:5672") conn.connect() producer = conn.Producer() - Pin.override(producer, tracer=tracer) task_queue = kombu.Queue(queue, kombu.Exchange(exchange), routing_key=routing_key) to_publish = {"message": message} @@ -24,8 +21,7 @@ def process_message(body, message): message.ack() messages.append(message.payload) - with kombu.Consumer(conn, [task_queue], accept=["json"], callbacks=[process_message]) as consumer: - Pin.override(consumer, tracer=tracer) + with kombu.Consumer(conn, [task_queue], accept=["json"], callbacks=[process_message]): conn.drain_events(timeout=timeout) conn.close() diff --git a/utils/build/docker/python/parametric/apm_test_client/__main__.py b/utils/build/docker/python/parametric/apm_test_client/__main__.py index 7019216af7..9c78df5de2 100644 --- a/utils/build/docker/python/parametric/apm_test_client/__main__.py +++ b/utils/build/docker/python/parametric/apm_test_client/__main__.py @@ -4,5 +4,8 @@ uvicorn.run( - "apm_test_client.server:app", host="0.0.0.0", port=int(os.getenv("APM_TEST_CLIENT_SERVER_PORT")), log_level="debug" + "apm_test_client.server:app", + host="0.0.0.0", + port=int(os.getenv("APM_TEST_CLIENT_SERVER_PORT", "80")), + log_level="debug", ) diff --git a/utils/build/docker/python/parametric/apm_test_client/server.py b/utils/build/docker/python/parametric/apm_test_client/server.py index d79d8ec27d..310fd79492 100644 --- a/utils/build/docker/python/parametric/apm_test_client/server.py +++ b/utils/build/docker/python/parametric/apm_test_client/server.py @@ -27,6 +27,7 @@ import ddtrace from ddtrace.trace import Span +from ddtrace._trace.sampling_rule import SamplingRule from ddtrace import config from ddtrace.contrib.trace_utils import set_http_meta from ddtrace.trace import Context @@ -110,7 +111,7 @@ def trace_config() -> TraceConfigReturn: config={ "dd_service": config.service, "dd_log_level": None, - "dd_trace_sample_rate": str(config._trace_sample_rate), + "dd_trace_sample_rate": str(_global_sampling_rate()), "dd_trace_enabled": str(config._tracing_enabled).lower(), "dd_runtime_metrics_enabled": str(config._runtime_metrics_enabled).lower(), "dd_tags": ",".join(f"{k}:{v}" for k, v in config.tags.items()), @@ -678,6 +679,19 @@ def get_ddtrace_version() -> Tuple[int, int, int]: return parse_version(getattr(ddtrace, "__version__", "")) +def _global_sampling_rate(): + for rule in ddtrace.tracer._sampler.rules: + if ( + rule.service == SamplingRule.NO_RULE + and rule.name == SamplingRule.NO_RULE + and rule.resource == SamplingRule.NO_RULE + and rule.tags == SamplingRule.NO_RULE + and rule.provenance == "default" + ): + return rule.sample_rate + return 1.0 + + # TODO: Remove all unused otel types and endpoints from parametric tests # Defined in apm_test_client.proto but not implemented in library clients (_library_client.py) # class OtelFlushTraceStatsArgs(BaseModel): diff --git a/utils/build/docker/python/python3.12.Dockerfile b/utils/build/docker/python/python3.12.Dockerfile index 6d3929e0f6..7f35b164ba 100644 --- a/utils/build/docker/python/python3.12.Dockerfile +++ b/utils/build/docker/python/python3.12.Dockerfile @@ -1,4 +1,4 @@ -FROM datadog/system-tests:python3.12.base-v5 +FROM datadog/system-tests:python3.12.base-v6 WORKDIR /app diff --git a/utils/build/docker/python/python3.12.base.Dockerfile b/utils/build/docker/python/python3.12.base.Dockerfile index 3948989775..b11eab8412 100644 --- a/utils/build/docker/python/python3.12.base.Dockerfile +++ b/utils/build/docker/python/python3.12.base.Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.12.1-slim +FROM python:3.12-slim # install bin dependancies RUN apt-get update && apt-get install -y curl git gcc g++ make cmake @@ -15,6 +15,6 @@ RUN pip install django pycryptodome gunicorn gevent requests boto3==1.34.141 'mo RUN curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y ENV PATH="/root/.cargo/bin:$PATH" -# docker build --progress=plain -f utils/build/docker/python/python3.12.base.Dockerfile -t datadog/system-tests:python3.12.base-v2 . -# docker push datadog/system-tests:python3.12.base-v2 +# docker build --progress=plain -f utils/build/docker/python/python3.12.base.Dockerfile -t datadog/system-tests:python3.12.base-v6 . +# docker push datadog/system-tests:python3.12.base-v6 diff --git a/utils/build/docker/python/uds-flask.Dockerfile b/utils/build/docker/python/uds-flask.Dockerfile index 08fb6d140d..17adce0ab3 100644 --- a/utils/build/docker/python/uds-flask.Dockerfile +++ b/utils/build/docker/python/uds-flask.Dockerfile @@ -1,4 +1,4 @@ -FROM datadog/system-tests:flask-poc.base-v7 +FROM datadog/system-tests:flask-poc.base-v8 WORKDIR /app diff --git a/utils/build/docker/ruby/graphql23/app/graphql/system_test_schema.rb b/utils/build/docker/ruby/graphql23/app/graphql/system_test_schema.rb index 91b2bf5265..1c7a0a3699 100644 --- a/utils/build/docker/ruby/graphql23/app/graphql/system_test_schema.rb +++ b/utils/build/docker/ruby/graphql23/app/graphql/system_test_schema.rb @@ -14,11 +14,12 @@ class SystemTestSchema < GraphQL::Schema rescue_from(RuntimeError) do |err, obj, args, ctx, field| # Custom extension values used for testing. raise GraphQL::ExecutionError.new(err.message, extensions: { - 'int-1': 1, - 'str-1': '1', - 'array-1-2': [1,'2'], - '': 'empty string', - ',': 'comma', + int: 1, + float: 1.1, + str: '1', + bool: true, + other: [1, 'foo'], + not_captured: 'nope', }) end diff --git a/utils/interfaces/_core.py b/utils/interfaces/_core.py index ae4e51841a..6c4034b450 100644 --- a/utils/interfaces/_core.py +++ b/utils/interfaces/_core.py @@ -119,15 +119,17 @@ def load_data_from_logs(self): def _append_data(self, data): self._data_list.append(data) - def get_data(self, path_filters=None): + def get_data(self, path_filters: list[str] | str | None = None): if path_filters is not None: if isinstance(path_filters, str): path_filters = [path_filters] - path_filters = [re.compile(path) for path in path_filters] + path_regexes = [re.compile(path) for path in path_filters] + else: + path_regexes = None for data in self._data_list: - if path_filters is not None and all(path.fullmatch(data["path"]) is None for path in path_filters): + if path_regexes is not None and all(path.fullmatch(data["path"]) is None for path in path_regexes): continue yield data @@ -207,23 +209,39 @@ def assert_schema_points(self, excluded_points=None): assert not has_error, f"Schema validation failed for {self.name}" - def assert_request_header(self, path, header_name_pattern: str, header_value_pattern: str) -> None: + def assert_response_header(self, path_filters, header_name_pattern: str, header_value_pattern: str) -> None: + """Assert that a header, and its value are present in all requests for a given path + header_name_pattern: a regular expression to match the header name (lower case) + header_value_pattern: a regular expression to match the header value + """ + + self._assert_header(path_filters, "response", header_name_pattern, header_value_pattern) + + def assert_request_header(self, path_filters, header_name_pattern: str, header_value_pattern: str) -> None: """Assert that a header, and its value are present in all requests for a given path header_name_pattern: a regular expression to match the header name (lower case) header_value_pattern: a regular expression to match the header value """ + self._assert_header(path_filters, "request", header_name_pattern, header_value_pattern) + + def _assert_header( + self, path_filters, request_or_response: str, header_name_pattern: str, header_value_pattern: str + ) -> None: data_found = False - for data in self.get_data(path): + for data in self.get_data(path_filters): data_found = True found = False - for header, value in data["request"]["headers"]: + for header, value in data[request_or_response]["headers"]: if re.fullmatch(header_name_pattern, header.lower()): if not re.fullmatch(header_value_pattern, value): - logger.error(f"Header {header} found in {data['log_filename']}, but value is {value}") + logger.error( + f"{request_or_response} header {header} found in " + f"{data['log_filename']}, but value is {value}" + ) else: found = True continue @@ -232,7 +250,7 @@ def assert_request_header(self, path, header_name_pattern: str, header_value_pat raise ValueError(f"{header_name_pattern} not found (or incorrect) in {data['log_filename']}") if not data_found: - raise ValueError(f"No data found for {path}") + raise ValueError(f"No data found for {path_filters}") class ValidationError(Exception): diff --git a/utils/interfaces/_library/core.py b/utils/interfaces/_library/core.py index b5d39ae329..6fece0d323 100644 --- a/utils/interfaces/_library/core.py +++ b/utils/interfaces/_library/core.py @@ -24,6 +24,8 @@ class LibraryInterfaceValidator(ProxyBasedInterfaceValidator): """Validate library/agent interface""" + trace_paths = ["/v0.4/traces", "/v0.5/traces"] + def __init__(self, name): super().__init__(name) self.ready = threading.Event() @@ -47,14 +49,12 @@ def wait_function(data): ############################################################ def get_traces(self, request=None): - paths = ["/v0.4/traces", "/v0.5/traces"] - rid = get_rid_from_request(request) if rid: logger.debug(f"Try to find traces related to request {rid}") - for data in self.get_data(path_filters=paths): + for data in self.get_data(path_filters=self.trace_paths): traces = data["request"]["content"] if not traces: # may be none continue @@ -251,7 +251,7 @@ def assert_all_traces_requests_forwarded(self, paths): raise ValueError("Some path has not been transmitted") def assert_trace_id_uniqueness(self): - trace_ids = {} + trace_ids: dict[int, str] = {} for data, trace in self.get_traces(): spans = [span for span in trace if span.get("parent_id") in ("0", 0, None)] diff --git a/utils/interfaces/_logs.py b/utils/interfaces/_logs.py index 905f850443..65c1b52f24 100644 --- a/utils/interfaces/_logs.py +++ b/utils/interfaces/_logs.py @@ -51,7 +51,7 @@ def _read(self): log_count = 0 try: with open(filename, encoding="utf-8") as f: - buffer = [] + buffer: list[str] = [] for raw_line in f: line = raw_line if line.endswith("\n"): diff --git a/utils/interfaces/_test_agent.py b/utils/interfaces/_test_agent.py index d4c8bdd983..f1fcb8e4ae 100644 --- a/utils/interfaces/_test_agent.py +++ b/utils/interfaces/_test_agent.py @@ -88,7 +88,7 @@ def get_telemetry_logs(self): def get_crash_reports(self): logger.debug("Try to find telemetry data related to crash reports") - crash_reports = [] + crash_reports: list = [] for t in self.get_telemetry_logs(): payload = t["payload"] diff --git a/utils/interfaces/schemas/serve_doc.py b/utils/interfaces/schemas/serve_doc.py index de520fcd4b..e6be7891ef 100644 --- a/utils/interfaces/schemas/serve_doc.py +++ b/utils/interfaces/schemas/serve_doc.py @@ -23,7 +23,7 @@ @app.route("/", methods=["GET"]) def default(): - data = {"schemas": []} + data: dict = {"schemas": []} for schema_id, schema in store.items(): # skip some schemas diff --git a/utils/otel_validators/validator_log.py b/utils/otel_validators/validator_log.py index 95e3fb002f..b6ed688e5c 100644 --- a/utils/otel_validators/validator_log.py +++ b/utils/otel_validators/validator_log.py @@ -26,6 +26,7 @@ def validate_log_trace_correlation(otel_log_trace_attrs: dict, trace: dict) -> N span = None for item in trace["spans"].items(): span = item[1] + assert span is not None assert otel_log_trace_attrs["trace_id"] == span["meta"]["otel.trace_id"] assert int(otel_log_trace_attrs["span_id"], 16) == int(span["span_id"]) assert str(otel_log_trace_attrs["severity_number"]) == "9" diff --git a/utils/parametric/_library_client.py b/utils/parametric/_library_client.py index 1689ed55a6..3df25ccb0c 100644 --- a/utils/parametric/_library_client.py +++ b/utils/parametric/_library_client.py @@ -49,6 +49,7 @@ def __init__(self, url: str, timeout: int, container: Container): self._base_url = url self._session = requests.Session() self.container = container + self.timeout = timeout # wait for server to start self._wait(timeout) @@ -70,6 +71,10 @@ def _wait(self, timeout): message = f"Timeout of {timeout} seconds exceeded waiting for HTTP server to start. Please check logs." _fail(message) + def container_restart(self): + self.container.restart() + self._wait(self.timeout) + def is_alive(self) -> bool: self.container.reload() return ( @@ -96,16 +101,16 @@ def crash(self) -> None: def container_exec_run(self, command: str) -> tuple[bool, str]: try: - code, (stdout, _) = self.container.exec_run(command, demux=True) + code, (stdout, stderr) = self.container.exec_run(command, demux=True) if code is None: success = False message = "Exit code from command in the parametric app container is None" - elif stdout is None: + elif stderr is not None or code != 0: success = False - message = "Stdout from command in the parametric app container is None" + message = f"Error code {code}: {stderr.decode()}" else: success = True - message = stdout.decode() + message = stdout.decode() if stdout is not None else "" except BaseException: return False, "Encountered an issue running command in the parametric app container" @@ -426,6 +431,9 @@ def crash(self) -> None: def container_exec_run(self, command: str) -> tuple[bool, str]: return self._client.container_exec_run(command) + def container_restart(self): + self._client.container_restart() + @contextlib.contextmanager def dd_start_span( self, diff --git a/utils/proxy/_deserializer.py b/utils/proxy/_deserializer.py index 2b82012f79..0cd0858cfb 100644 --- a/utils/proxy/_deserializer.py +++ b/utils/proxy/_deserializer.py @@ -65,7 +65,7 @@ def _decode_v_0_5_traces(content): result = [] for spans in payload: - decoded_spans = [] + decoded_spans: list = [] result.append(decoded_spans) for span in spans: decoded_span = { diff --git a/utils/proxy/core.py b/utils/proxy/core.py index e6531d7e79..0947f15e0b 100644 --- a/utils/proxy/core.py +++ b/utils/proxy/core.py @@ -23,7 +23,7 @@ SIMPLE_TYPES = (bool, int, float, type(None)) -messages_counts = defaultdict(int) +messages_counts: dict[str, int] = defaultdict(int) class ObjectDumpEncoder(json.JSONEncoder): @@ -54,7 +54,7 @@ def __init__(self) -> None: # mimic the old API self.rc_api_sequential_commands = None - self.rc_api_runtime_ids_request_count = None + self.rc_api_runtime_ids_request_count: dict = {} @staticmethod def get_error_response(message) -> http.Response: diff --git a/utils/proxy/scrubber.py b/utils/proxy/scrubber.py index c12fd750b6..7290e5ada0 100644 --- a/utils/proxy/scrubber.py +++ b/utils/proxy/scrubber.py @@ -12,7 +12,7 @@ _name_filter = re.compile(r"key|token|secret|pass|docker_login", re.IGNORECASE) -def _get_secrets() -> list[str]: +def _get_secrets() -> set[str]: secrets: list = [ value.strip() for name, value in os.environ.items() @@ -21,25 +21,25 @@ def _get_secrets() -> list[str]: return set(secrets) -def _instrument_write_methods_str(f, secrets: list[str]) -> None: +def _instrument_write_methods_str(f, secrets: set[str]) -> None: original_write = f.write def write(data): for secret in secrets: - data = data.replace(secret, "") + data = data.replace(secret, "--redacted--") original_write(data) f.write = write -def _instrument_write_methods_bytes(f, secrets: list[str]) -> None: +def _instrument_write_methods_bytes(f, secrets: set[str]) -> None: original_write = f.write def write(data): if hasattr(data, "replace"): for secret in secrets: - data = data.replace(secret.encode(), b"") + data = data.replace(secret.encode(), b"--redacted--") original_write(data) @@ -89,10 +89,10 @@ def _instrumented_file_io(file, mode="r", *args, **kwargs): # noqa: ANN002 _original_open = builtins.open -builtins.open = _instrumented_open +builtins.open = _instrumented_open # type: ignore[attr-defined] _original_pathlib_open = Path.open -Path.open = _instrumented_path_open +Path.open = _instrumented_path_open # type: ignore[assignment] _original_file_io = io.FileIO -io.FileIO = _instrumented_file_io +io.FileIO = _instrumented_file_io # type: ignore[misc, assignment] diff --git a/utils/scripts/check_version.sh b/utils/scripts/check_version.sh index cf414f171a..bc51b0696a 100755 --- a/utils/scripts/check_version.sh +++ b/utils/scripts/check_version.sh @@ -7,6 +7,7 @@ weblog_variant="spring-boot" scenario="DEFAULT" build_prefix="dd-java-agent" build_suffix="jar" +test_name="tests/appsec/waf/test_addresses.py::Test_PathParams" # leave empty for all tests readonly OUTPUT_DIR=version-check-output/$library mkdir -p "$OUTPUT_DIR" @@ -103,7 +104,7 @@ for v in "${versions[@]}"; do echo "Building $library $v $weblog_variant" ./build.sh --library $library --weblog-variant $weblog_variant &> /dev/null echo "Running $library $v $weblog_variant" - ./run.sh --scenario $scenario &> "$OUTPUT_DIR/$scenario/$weblog_variant/$library-$v.txt" || true + ./run.sh --scenario $scenario $test_name &> "$OUTPUT_DIR/$scenario/$weblog_variant/$library-$v.txt" || true fi declare -A seen_xpass diff --git a/utils/scripts/compute-workflow-parameters.py b/utils/scripts/compute-workflow-parameters.py index 74d43bddc0..287d0a1018 100644 --- a/utils/scripts/compute-workflow-parameters.py +++ b/utils/scripts/compute-workflow-parameters.py @@ -5,7 +5,7 @@ def get_github_workflow_map(scenarios, scenarios_groups) -> dict: - result = {} + result: dict = {} scenarios_groups = [group.strip() for group in scenarios_groups if group.strip()] scenarios = {scenario.strip(): False for scenario in scenarios if scenario.strip()} @@ -40,8 +40,8 @@ def get_github_workflow_map(scenarios, scenarios_groups) -> dict: return result -def get_graphql_weblogs(library) -> list[str]: - weblogs = { +def get_graphql_weblogs(library: str) -> list[str]: + weblogs: dict[str, list[str]] = { "cpp": [], "dotnet": [], "golang": ["gqlgen", "graph-gophers", "graphql-go"], @@ -100,7 +100,7 @@ def get_endtoend_weblogs(library, ci_environment: str) -> list[str]: def get_opentelemetry_weblogs(library) -> list[str]: - weblogs = { + weblogs: dict[str, list[str]] = { "cpp": [], "dotnet": [], "golang": [], @@ -126,7 +126,7 @@ def _print_output(result: dict[str, dict], output_format: str) -> None: def main( language: str, scenarios: str, groups: str, parametric_job_count: int, ci_environment: str, output_format: str ) -> None: - result = defaultdict(dict) + result: dict = defaultdict(dict) # this data struture is a dict where: # the key is the workflow identifier # the value is also a dict, where the key/value pair is the parameter name/value. diff --git a/utils/scripts/compute_impacted_scenario.py b/utils/scripts/compute_impacted_scenario.py index 077aa5e6c4..ccc20fecb8 100644 --- a/utils/scripts/compute_impacted_scenario.py +++ b/utils/scripts/compute_impacted_scenario.py @@ -9,7 +9,7 @@ class Result: def __init__(self) -> None: self.scenarios = {"DEFAULT"} # always run the default scenario - self.scenarios_groups = set() + self.scenarios_groups: set[str] = set() def add_scenario(self, scenario: str) -> None: if scenario == "EndToEndScenario": @@ -20,7 +20,7 @@ def add_scenario(self, scenario: str) -> None: def add_scenario_group(self, scenario_group: str) -> None: self.scenarios_groups.add(scenario_group) - def add_scenarios(self, scenarios: set[str]) -> None: + def add_scenarios(self, scenarios: set[str] | list[str]) -> None: for scenario in scenarios: self.add_scenario(scenario) diff --git a/utils/scripts/extract_appsec_waf_rules.py b/utils/scripts/extract_appsec_waf_rules.py index 5703437b3a..c8fb10a832 100644 --- a/utils/scripts/extract_appsec_waf_rules.py +++ b/utils/scripts/extract_appsec_waf_rules.py @@ -18,7 +18,7 @@ def to_camel_case(str_input) -> str: rules_key = {"1.0": "events", "2.1": "rules", "2.2": "rules"}[version] -result = defaultdict(dict) +result: dict = defaultdict(dict) for event in data[rules_key]: name = event["id"] name = name.replace("-", "_") diff --git a/utils/scripts/get-change-log.py b/utils/scripts/get-change-log.py index c4290fc722..2c764c6e2c 100644 --- a/utils/scripts/get-change-log.py +++ b/utils/scripts/get-change-log.py @@ -8,7 +8,7 @@ for page in range(1, 7): r = requests.get( "https://api.github.com/repos/DataDog/system-tests/pulls", - params={"state": "closed", "per_page": 100, "page": page}, + params={"state": "closed", "per_page": "100", "page": str(page)}, timeout=10, ) diff --git a/utils/scripts/get-image-list.py b/utils/scripts/get-image-list.py index 39f471bdbd..59ea805231 100644 --- a/utils/scripts/get-image-list.py +++ b/utils/scripts/get-image-list.py @@ -7,7 +7,7 @@ from utils._context.containers import _get_client -def main(scenarios: list[str], library: str | None = None, weblog: str | None = None) -> None: +def main(scenarios: list[str], library: str, weblog: str) -> None: images = set("") existing_tags = [] @@ -19,14 +19,12 @@ def main(scenarios: list[str], library: str | None = None, weblog: str | None = images.update(scenario.get_image_list(library, weblog)) # remove images that will be built locally - images = [image for image in images if not image.startswith("system_tests/")] + images = {image for image in images if not image.startswith("system_tests/")} # remove images that exists locally (they may not exists in the registry, ex: buddies) - images = [image for image in images if image not in existing_tags] + images = {image for image in images if image not in existing_tags} - images.sort() - - compose_data = {"services": {re.sub(r"[/:\.]", "-", image): {"image": image} for image in images}} + compose_data = {"services": {re.sub(r"[/:\.]", "-", image): {"image": image} for image in sorted(images)}} print(yaml.dump(compose_data, default_flow_style=False)) diff --git a/utils/scripts/get-nightly-logs.py b/utils/scripts/get-nightly-logs.py index f9984c678f..2a86c27592 100644 --- a/utils/scripts/get-nightly-logs.py +++ b/utils/scripts/get-nightly-logs.py @@ -65,7 +65,7 @@ def get_artifacts(session: requests.Session, repo_slug: str, workflow_file: str, return artifacts -def download_artifact(session: requests.Session, artifact: dict, output_dir: str | None = None) -> None: +def download_artifact(session: requests.Session, artifact: dict, output_dir: str) -> None: logging.info("Downloading artifact: %s", artifact["name"]) response = session.get(artifact["archive_download_url"], timeout=60) response.raise_for_status() @@ -77,7 +77,7 @@ def download_artifact(session: requests.Session, artifact: dict, output_dir: str for file in os.listdir(output_dir): if file.endswith(".tar.gz") and Path(os.path.join(output_dir, file)).is_file(): with tarfile.open(os.path.join(output_dir, file), "r:gz") as t: - t.extractall(output_dir, filter=lambda tar_info, _: tar_info) + t.extractall(output_dir, filter=lambda tar_info, _: tar_info) # type: ignore[call-arg] def main( diff --git a/utils/scripts/get-workflow-summary.py b/utils/scripts/get-workflow-summary.py index 1ca05c0471..e6bd831aae 100644 --- a/utils/scripts/get-workflow-summary.py +++ b/utils/scripts/get-workflow-summary.py @@ -63,4 +63,4 @@ def main(repo_slug: str, run_id: int) -> None: if __name__ == "__main__": - main("DataDog/system-tests", sys.argv[1]) + main("DataDog/system-tests", int(sys.argv[1])) diff --git a/utils/scripts/grep-nightly-logs.py b/utils/scripts/grep-nightly-logs.py index 590822cef1..f103b063a6 100644 --- a/utils/scripts/grep-nightly-logs.py +++ b/utils/scripts/grep-nightly-logs.py @@ -12,7 +12,7 @@ logging.getLogger("urllib3").setLevel(logging.WARNING) -def get_environ() -> None: +def get_environ() -> dict[str, str]: environ = {**os.environ} try: @@ -44,7 +44,7 @@ def main( url = f"https://api.github.com/repos/{repo_slug}/actions/workflows/{workflow_file}/runs?" - params = {"per_page": 100} + params: dict[str, str | int] = {"per_page": "100"} if branch: params["branch"] = branch @@ -63,13 +63,14 @@ def main( logging.info(f"Workflow #{workflow['run_number']}-{attempt} {workflow['created_at']} {workflow_url}") jobs_url = f"https://api.github.com/repos/{repo_slug}/actions/runs/{workflow_id}/attempts/{attempt}/jobs" - params = {"per_page": 100, "page": 1} + params = {"per_page": "100"} + page = 1 - jobs = get_json(jobs_url, headers=headers, params=params) + jobs = get_json(jobs_url, headers=headers, params=params | {"page": page}) while len(jobs["jobs"]) < jobs["total_count"]: - params["page"] += 1 - jobs["jobs"] += get_json(jobs_url, headers=headers, params=params)["jobs"] + page += 1 + jobs["jobs"] += get_json(jobs_url, headers=headers, params=params | {"page": page})["jobs"] for job in jobs["jobs"]: job_name = job["name"] diff --git a/utils/scripts/load-binary.sh b/utils/scripts/load-binary.sh index f50db846cf..07555895b0 100755 --- a/utils/scripts/load-binary.sh +++ b/utils/scripts/load-binary.sh @@ -185,7 +185,7 @@ elif [ "$TARGET" = "dotnet" ]; then elif [ "$TARGET" = "python" ]; then assert_version_is_dev - TARGET_BRANCH="${TARGET_BRANCH:-3.x-staging}" + TARGET_BRANCH="${TARGET_BRANCH:-main}" echo "git+https://github.com/DataDog/dd-trace-py.git@$TARGET_BRANCH" > python-load-from-pip echo "Using $(cat python-load-from-pip)" diff --git a/utils/scripts/markdown_logs.py b/utils/scripts/markdown_logs.py index a69d2fb1b3..66619c2699 100644 --- a/utils/scripts/markdown_logs.py +++ b/utils/scripts/markdown_logs.py @@ -4,12 +4,12 @@ from pathlib import Path -def table_row(*args: list[str]) -> None: +def table_row(*args: str) -> None: print(f"| {' | '.join(args)} |") def main() -> None: - result = {} + result: dict[str, dict[str, int]] = {} all_outcomes = {"passed": "✅", "xpassed": "🍇", "skipped": "⏸️", "failed": "❌"} for x in os.listdir("."): diff --git a/utils/tools.py b/utils/tools.py index d6fa964cc2..5178e3229d 100644 --- a/utils/tools.py +++ b/utils/tools.py @@ -7,6 +7,7 @@ import os import re import sys +from typing import Any class ShColors(StrEnum): @@ -47,31 +48,32 @@ def update_environ_with_local_env() -> None: DEBUG_LEVEL_STDOUT = 100 -logging.addLevelName(DEBUG_LEVEL_STDOUT, "STDOUT") - -def stdout(self, message, *args, **kws) -> None: # noqa: ANN002 - if self.isEnabledFor(DEBUG_LEVEL_STDOUT): - # Yes, logger takes its '*args' as 'args'. - self._log(DEBUG_LEVEL_STDOUT, message, args, **kws) # pylint: disable=protected-access +class Logger(logging.Logger): + terminal: Any - if hasattr(self, "terminal"): - self.terminal.write_line(message) - self.terminal.flush() - else: - # at this point, the logger may not yet be configured with the pytest terminal - # so directly print in stdout - print(message) # noqa: T201 + def stdout(self, message, *args, **kws) -> None: # noqa: ANN002 + if self.isEnabledFor(DEBUG_LEVEL_STDOUT): + # Yes, logger takes its '*args' as 'args'. + self._log(DEBUG_LEVEL_STDOUT, message, args, **kws) # pylint: disable=protected-access + if hasattr(self, "terminal"): + self.terminal.write_line(message) + self.terminal.flush() + else: + # at this point, the logger may not yet be configured with the pytest terminal + # so directly print in stdout + print(message) # noqa: T201 -logging.Logger.stdout = stdout +logging.setLoggerClass(Logger) +logging.getLogger("requests").setLevel(logging.WARNING) +logging.getLogger("urllib3").setLevel(logging.WARNING) +logging.addLevelName(DEBUG_LEVEL_STDOUT, "STDOUT") -def get_logger(name="tests", *, use_stdout=False) -> logging.Logger: - result = logging.getLogger(name) - logging.getLogger("requests").setLevel(logging.WARNING) - logging.getLogger("urllib3").setLevel(logging.WARNING) +def get_logger(name="tests", *, use_stdout=False) -> Logger: + result: Logger = logging.getLogger(name) # type: ignore[assignment] if use_stdout: stdout_handler = logging.StreamHandler(sys.stdout) @@ -103,7 +105,7 @@ def e(message: str) -> str: logger = get_logger() -def get_rid_from_request(request) -> str: +def get_rid_from_request(request) -> str | None: if request is None: return None @@ -111,7 +113,7 @@ def get_rid_from_request(request) -> str: return user_agent[-36:] -def get_rid_from_span(span) -> str: +def get_rid_from_span(span) -> str | None: if not isinstance(span, dict): logger.error(f"Span should be an object, not {type(span)}") return None @@ -148,7 +150,7 @@ def get_rid_from_span(span) -> str: return get_rid_from_user_agent(user_agent) -def get_rid_from_user_agent(user_agent: str) -> str: +def get_rid_from_user_agent(user_agent: str) -> str | None: if not user_agent: return None diff --git a/utils/virtual_machine/vm_logger.py b/utils/virtual_machine/vm_logger.py index a5ad1efb6a..2214f35872 100644 --- a/utils/virtual_machine/vm_logger.py +++ b/utils/virtual_machine/vm_logger.py @@ -3,11 +3,12 @@ from utils import context -def vm_logger(scenario_name, log_name, level=logging.INFO): +def vm_logger(scenario_name, log_name, level=logging.INFO, log_folder=None): + output_folder = log_folder or context.scenario.host_log_folder specified_logger = logging.getLogger(log_name) if len(specified_logger.handlers) == 0: formatter = logging.Formatter("%(asctime)s:%(message)s", "%Y-%m-%d %H.%M.%S") - handler = logging.FileHandler(f"{context.scenario.host_log_folder}/{log_name}.log") + handler = logging.FileHandler(f"{output_folder}/{log_name}.log") handler.setFormatter(formatter) specified_logger.setLevel(level) specified_logger.addHandler(handler)