Skip to content

chore(tracer): make ddtrace.Tracer a singleton #54906

chore(tracer): make ddtrace.Tracer a singleton

chore(tracer): make ddtrace.Tracer a singleton #54906

Workflow file for this run

name: System Tests
on:
push:
branches:
- main
- 'mq-working-branch**'
pull_request:
workflow_dispatch: {}
schedule:
- cron: '00 04 * * 2-6'
jobs:
system-tests-build-agent:
runs-on: ubuntu-latest
steps:
- name: Checkout system tests
uses: actions/checkout@v4
with:
persist-credentials: false
repository: 'DataDog/system-tests'
- name: Build agent
run: ./build.sh -i agent
- name: Save
id: save
run: |
docker image save system_tests/agent:latest | gzip > agent_${{ github.sha }}.tar.gz
- uses: actions/upload-artifact@v4
with:
name: agent_${{ github.sha }}
path: |
agent_${{ github.sha }}.tar.gz
retention-days: 2
system-tests-build-weblog:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- weblog-variant: flask-poc
- weblog-variant: uwsgi-poc
- weblog-variant: django-poc
- weblog-variant: fastapi
# runs django-poc for 3.12
- weblog-variant: python3.12
fail-fast: false
env:
TEST_LIBRARY: python
WEBLOG_VARIANT: ${{ matrix.weblog-variant }}
# system-tests requires an API_KEY, but it does not have to be a valid key, as long as we don't run a scenario
# that make assertion on backend data. Using a fake key allow to run system tests on PR originating from forks.
# If ever it's needed, a valid key exists in the repo, using ${{ secrets.DD_API_KEY }}
DD_API_KEY: ${{ secrets.FAKE_DD_API_KEY }}
CMAKE_BUILD_PARALLEL_LEVEL: 12
SYSTEM_TESTS_AWS_ACCESS_KEY_ID: ${{ secrets.IDM_AWS_ACCESS_KEY_ID }}
SYSTEM_TESTS_AWS_SECRET_ACCESS_KEY: ${{ secrets.IDM_AWS_SECRET_ACCESS_KEY }}
steps:
- name: Checkout system tests
uses: actions/checkout@v4
with:
persist-credentials: false
repository: 'DataDog/system-tests'
- name: Checkout dd-trace-py
uses: actions/checkout@v4
with:
persist-credentials: false
path: 'binaries/dd-trace-py'
fetch-depth: 0
# NB this ref is necessary to keep the checkout out of detached HEAD state, which setuptools_scm requires for
# proper version guessing
ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: Build
run: ./build.sh -i weblog
- name: Save
id: save
run: |
docker image save system_tests/weblog:latest | gzip > ${{ matrix.weblog-variant}}_weblog_${{ github.sha }}.tar.gz
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.weblog-variant }}_${{ github.sha }}
path: |
${{ matrix.weblog-variant}}_weblog_${{ github.sha }}.tar.gz
retention-days: 2
system-tests:
runs-on: ubuntu-latest
needs: [system-tests-build-agent, system-tests-build-weblog]
strategy:
matrix:
weblog-variant: [flask-poc, uwsgi-poc , django-poc, fastapi, python3.12]
scenario: [remote-config, appsec, appsec-1, other, debugger-1, debugger-2]
fail-fast: false
env:
TEST_LIBRARY: python
WEBLOG_VARIANT: ${{ matrix.weblog-variant }}
# system-tests requires an API_KEY, but it does not have to be a valid key, as long as we don't run a scenario
# that make assertion on backend data. Using a fake key allow to run system tests on PR originating from forks.
# If ever it's needed, a valid key exists in the repo, using ${{ secrets.DD_API_KEY }}
DD_API_KEY: ${{ secrets.FAKE_DD_API_KEY }}
CMAKE_BUILD_PARALLEL_LEVEL: 12
SYSTEM_TESTS_AWS_ACCESS_KEY_ID: ${{ secrets.IDM_AWS_ACCESS_KEY_ID }}
SYSTEM_TESTS_AWS_SECRET_ACCESS_KEY: ${{ secrets.IDM_AWS_SECRET_ACCESS_KEY }}
steps:
- name: Checkout system tests
uses: actions/checkout@v4
with:
persist-credentials: false
repository: 'DataDog/system-tests'
- name: Build runner
uses: ./.github/actions/install_runner
- uses: actions/download-artifact@v4
with:
name: ${{ matrix.weblog-variant }}_${{ github.sha }}
path: images_artifacts/
- uses: actions/download-artifact@v4
with:
name: agent_${{ github.sha }}
path: images_artifacts/
- name: docker load
id: docker_load
run: |
docker load < images_artifacts/${{ matrix.weblog-variant}}_weblog_${{ github.sha }}.tar.gz
docker load < images_artifacts/agent_${{ github.sha }}.tar.gz
- name: Run DEFAULT
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'other'
run: ./run.sh DEFAULT
- name: Run SAMPLING
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'other'
run: ./run.sh SAMPLING
- name: Run INTEGRATIONS
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'other'
run: ./run.sh INTEGRATIONS
- name: Run CROSSED_TRACING_LIBRARIES
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'other'
run: ./run.sh CROSSED_TRACING_LIBRARIES
- name: Run PROFILING
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'other'
run: |
cat /proc/sys/kernel/perf_event_paranoid
sudo sysctl kernel.perf_event_paranoid=1
sudo sysctl -p
./run.sh PROFILING
- name: Run REMOTE_CONFIG_MOCKED_BACKEND_ASM_FEATURES
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'remote-config'
run: ./run.sh REMOTE_CONFIG_MOCKED_BACKEND_ASM_FEATURES
- name: Run REMOTE_CONFIG_MOCKED_BACKEND_LIVE_DEBUGGING
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'remote-config'
run: ./run.sh REMOTE_CONFIG_MOCKED_BACKEND_LIVE_DEBUGGING
- name: Run REMOTE_CONFIG_MOCKED_BACKEND_ASM_DD
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'remote-config'
run: ./run.sh REMOTE_CONFIG_MOCKED_BACKEND_ASM_DD
- name: Run APPSEC_MISSING_RULES
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec'
run: ./run.sh APPSEC_MISSING_RULES
- name: Run APPSEC_AUTO_EVENTS_EXTENDED
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec'
run: ./run.sh APPSEC_AUTO_EVENTS_EXTENDED
- name: Run APPSEC_CUSTOM_RULES
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec'
run: ./run.sh APPSEC_CUSTOM_RULES
- name: Run APPSEC_CORRUPTED_RULES
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec'
run: ./run.sh APPSEC_CORRUPTED_RULES
- name: Run APPSEC_RULES_MONITORING_WITH_ERRORS
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec'
run: ./run.sh APPSEC_RULES_MONITORING_WITH_ERRORS
- name: Run APPSEC_LOW_WAF_TIMEOUT
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec'
run: ./run.sh APPSEC_LOW_WAF_TIMEOUT
- name: Run APPSEC_CUSTOM_OBFUSCATION
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec'
run: ./run.sh APPSEC_CUSTOM_OBFUSCATION
- name: Run APPSEC_RATE_LIMITER
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec'
run: ./run.sh APPSEC_RATE_LIMITER
- name: Run APPSEC_STANDALONE
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1'
run: ./run.sh APPSEC_STANDALONE
- name: Run IAST_STANDALONE
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1'
run: ./run.sh IAST_STANDALONE
- name: Run SCA_STANDALONE
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1'
run: ./run.sh SCA_STANDALONE
- name: Run APPSEC_RUNTIME_ACTIVATION
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1'
run: ./run.sh APPSEC_RUNTIME_ACTIVATION
- name: Run APPSEC_WAF_TELEMETRY
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1'
run: ./run.sh APPSEC_WAF_TELEMETRY
- name: Run APPSEC_DISABLED
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1'
run: ./run.sh APPSEC_DISABLED
- name: Run APPSEC_BLOCKING
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1'
run: ./run.sh APPSEC_BLOCKING
- name: Run APPSEC_BLOCKING_FULL_DENYLIST
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1'
run: ./run.sh APPSEC_BLOCKING_FULL_DENYLIST
- name: Run APPSEC_REQUEST_BLOCKING
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1'
run: ./run.sh APPSEC_REQUEST_BLOCKING
- name: Run APPSEC_RASP
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1'
run: ./run.sh APPSEC_RASP
- name: Run DEBUGGER_PROBES_STATUS
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'debugger-1'
run: ./run.sh DEBUGGER_PROBES_STATUS
- name: Run DEBUGGER_PROBES_SNAPSHOT
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'debugger-1'
run: ./run.sh DEBUGGER_PROBES_SNAPSHOT
- name: Run DEBUGGER_PII_REDACTION
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'debugger-1'
run: ./run.sh DEBUGGER_PII_REDACTION
- name: Run DEBUGGER_EXPRESSION_LANGUAGE
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'debugger-1'
run: ./run.sh DEBUGGER_EXPRESSION_LANGUAGE
- name: Run DEBUGGER_EXCEPTION_REPLAY
if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'debugger-2'
run: ./run.sh DEBUGGER_EXCEPTION_REPLAY
# The compress step speed up a lot the upload artifact process
- name: Compress artifact
if: always() && steps.docker_load.outcome == 'success'
id: compress-artifact
run: tar -czvf artifact.tar.gz $(ls | grep logs)
- name: Upload artifact
uses: actions/upload-artifact@v4
if: always() && steps.docker_load.outcome == 'success'
with:
name: logs_${{ matrix.weblog-variant }}_${{ matrix.scenario }}
path: artifact.tar.gz
parametric:
runs-on:
group: "APM Larger Runners"
env:
TEST_LIBRARY: python
steps:
- name: Checkout system tests
uses: actions/checkout@v4
with:
persist-credentials: false
repository: 'DataDog/system-tests'
- name: Checkout dd-trace-py
uses: actions/checkout@v4
with:
persist-credentials: false
path: 'binaries/dd-trace-py'
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: Build runner
id: build_runner
uses: ./.github/actions/install_runner
- name: Run
if: always() && steps.build_runner.outcome == 'success'
run: ./run.sh PARAMETRIC
- name: Compress artifact
if: always() && steps.build_runner.outcome == 'success'
run: tar -czvf artifact.tar.gz $(ls | grep logs)
- name: Upload artifact
uses: actions/upload-artifact@v4
if: always() && steps.build_runner.outcome == 'success'
with:
name: logs_parametric
path: artifact.tar.gz
finished:
runs-on: ubuntu-latest
needs: [parametric, system-tests]
if: success() || failure()
steps:
- name: True when everything else succeeded
if: needs.parametric.result == 'success' && needs.system-tests.result == 'success'
run: exit 0
- name: Fails if anything else failed
if: needs.parametric.result != 'success' || needs.system-tests.result != 'success'
run: exit 1