diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index ee15cde34..66e9f8aec 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -8,6 +8,7 @@ Fixes # (issue) Please delete options that are not relevant. +- [ ] Dependency update - [ ] Bug fix - [ ] New feature - [ ] Refactor/improvement diff --git a/.github/workflows/agreements.yaml b/.github/workflows/agreements.yaml index 7688f1fd6..afe92b9bc 100644 --- a/.github/workflows/agreements.yaml +++ b/.github/workflows/agreements.yaml @@ -7,7 +7,7 @@ on: jobs: call-workflow-agreements: - uses: splunk/addonfactory-github-workflows/.github/workflows/reusable-agreements.yaml@v1.4.1 + uses: splunk/addonfactory-github-workflows/.github/workflows/reusable-agreements.yaml@v1.4.2 permissions: actions: read contents: read diff --git a/.github/workflows/cd-docs-pdf.yaml b/.github/workflows/cd-docs-pdf.yaml index 9eccd5516..2f836c475 100644 --- a/.github/workflows/cd-docs-pdf.yaml +++ b/.github/workflows/cd-docs-pdf.yaml @@ -41,6 +41,6 @@ jobs: sudo apt-get install -y pandoc texlive librsvg2-bin texlive-latex-extra pandoc -s --pdf-engine=pdflatex -o /tmp/SC4S4SNMP_docs.pdf $(find . -type f -name "*.md") -H deeplists.tex - name: Release - uses: softprops/action-gh-release@v1 + uses: softprops/action-gh-release@v2 with: files: /tmp/SC4SNMP_docs.pdf diff --git a/.github/workflows/cd-pages.yaml b/.github/workflows/cd-pages.yaml index b07fe949a..2eba58202 100644 --- a/.github/workflows/cd-pages.yaml +++ b/.github/workflows/cd-pages.yaml @@ -19,7 +19,7 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: oleksiyrudenko/gha-git-credentials@v2.1.1 + - uses: oleksiyrudenko/gha-git-credentials@v2.1.2 with: token: "${{ secrets.PAT_CLATOOL }}" @@ -29,7 +29,7 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Login to GitHub Packages Docker Registry - uses: docker/login-action@v3.0.0 + uses: docker/login-action@v3.1.0 with: registry: ghcr.io username: ${{ github.actor }} @@ -104,7 +104,7 @@ jobs: helm repo index /tmp/package --url https://github.com/splunk/splunk-connect-for-snmp/releases/download/$VERSION --merge /tmp/origin/index.yaml cp /tmp/package/index.yaml /tmp/index/ - name: Deploy - uses: peaceiris/actions-gh-pages@v3 + uses: peaceiris/actions-gh-pages@v4 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: /tmp/index diff --git a/.github/workflows/ci-docker-deployment.yaml b/.github/workflows/ci-docker-deployment.yaml new file mode 100644 index 000000000..e2aa045dc --- /dev/null +++ b/.github/workflows/ci-docker-deployment.yaml @@ -0,0 +1,34 @@ +name: create-compose-files +on: + push: + tags: + - "v*" + +jobs: + build: + name: Build + runs-on: ubuntu-latest + permissions: + contents: write + packages: write + strategy: + matrix: + python-version: + - 3.9 + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Zip docker_compose directory and upload it + run: | + zip -r docker_compose.zip docker_compose + VERSION=$(echo $GITHUB_REF | cut -d / -f 3) + gh release upload $VERSION docker_compose.zip + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Create artifact + uses: actions/upload-artifact@v4 + with: + name: ${{ env.ARTIFACT_NAME }} + path: docker_compose.zip + env: + ARTIFACT_NAME: docker_compose diff --git a/.github/workflows/ci-main.yaml b/.github/workflows/ci-main.yaml index df409b653..8ffa278fb 100644 --- a/.github/workflows/ci-main.yaml +++ b/.github/workflows/ci-main.yaml @@ -71,7 +71,7 @@ jobs: steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 - - uses: pre-commit/action@v3.0.0 + - uses: pre-commit/action@v3.0.1 test-unit: name: Test Unit Python ${{ matrix.python-version }} runs-on: ubuntu-latest @@ -92,16 +92,16 @@ jobs: - name: Run Pytest with coverage run: | poetry run pytest --cov=./splunk_connect_for_snmp --cov-report=xml --junitxml=test-results/junit.xml - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - files: ./coverage.xml - directory: ./coverage/reports/ - env_vars: OS,PYTHON - fail_ci_if_error: true - path_to_write_report: ./coverage/codecov_report.txt - verbose: true +# - name: Upload coverage to Codecov +# uses: codecov/codecov-action@v3 +# with: +# token: ${{ secrets.CODECOV_TOKEN }} +# files: ./coverage.xml +# directory: ./coverage/reports/ +# env_vars: OS,PYTHON +# fail_ci_if_error: true +# path_to_write_report: ./coverage/codecov_report.txt +# verbose: true - uses: actions/upload-artifact@v4 # upload test results if: success() || failure() # run this step even if previous step failed with: @@ -140,7 +140,7 @@ jobs: uses: actions/checkout@v4 - name: run install_microk8s.sh run: | - sudo snap install microk8s --classic --channel=1.25/stable + sudo snap install microk8s --classic --channel=1.30/stable sudo apt-get install snmp -y sudo apt-get install python3-dev -y - name: run automatic_setup.sh diff --git a/.github/workflows/ci-release-pr.yaml b/.github/workflows/ci-release-pr.yaml index 269549f2c..b43ad8c06 100644 --- a/.github/workflows/ci-release-pr.yaml +++ b/.github/workflows/ci-release-pr.yaml @@ -42,7 +42,7 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Login to GitHub Packages Docker Registry - uses: docker/login-action@v3.0.0 + uses: docker/login-action@v3.1.0 with: registry: ghcr.io username: ${{ github.actor }} diff --git a/.github/workflows/ci-release.yaml b/.github/workflows/ci-release.yaml index 77316c5ba..96caf2ef7 100644 --- a/.github/workflows/ci-release.yaml +++ b/.github/workflows/ci-release.yaml @@ -43,7 +43,7 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Login to GitHub Packages Docker Registry - uses: docker/login-action@v3.0.0 + uses: docker/login-action@v3.1.0 with: registry: ghcr.io username: ${{ github.actor }} @@ -77,14 +77,14 @@ jobs: node-version: "14" - name: Semantic Release id: version - uses: splunk/semantic-release-action@v1.3.3 + uses: splunk/semantic-release-action@v1.3.4 with: git_committer_name: ${{ secrets.SA_GH_USER_NAME }} git_committer_email: ${{ secrets.SA_GH_USER_EMAIL }} gpg_private_key: ${{ secrets.SA_GPG_PRIVATE_KEY }} passphrase: ${{ secrets.SA_GPG_PASSPHRASE }} extra_plugins: | - @google/semantic-release-replace-plugin + @google/semantic-release-replace-plugin@1.2.7 env: GITHUB_TOKEN: ${{ secrets.GH_TOKEN_ADMIN }} diff --git a/.github/workflows/ci-ui-tests.yaml b/.github/workflows/ci-ui-tests.yaml index 037a3da13..9a37f61d8 100644 --- a/.github/workflows/ci-ui-tests.yaml +++ b/.github/workflows/ci-ui-tests.yaml @@ -71,7 +71,7 @@ jobs: - name: install microk8s run: | - sudo snap install microk8s --classic --channel=1.25/stable + sudo snap install microk8s --classic --channel=1.30/stable sudo apt-get install snmp -y sudo apt-get install python3-dev -y diff --git a/.github/workflows/mike.yaml b/.github/workflows/mike.yaml index 54cdbb74d..b67b6aa46 100644 --- a/.github/workflows/mike.yaml +++ b/.github/workflows/mike.yaml @@ -35,7 +35,7 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: oleksiyrudenko/gha-git-credentials@v2.1.1 + - uses: oleksiyrudenko/gha-git-credentials@v2.1.2 with: token: "${{ secrets.PAT_CLATOOL }}" diff --git a/.github/workflows/release-notes.yaml b/.github/workflows/release-notes.yaml index a39d267b4..46a7d62d2 100644 --- a/.github/workflows/release-notes.yaml +++ b/.github/workflows/release-notes.yaml @@ -10,6 +10,6 @@ jobs: permissions: contents: write packages: write - uses: splunk/addonfactory-github-workflows/.github/workflows/reusable-release-notes.yaml@v1.4.1 + uses: splunk/addonfactory-github-workflows/.github/workflows/reusable-release-notes.yaml@v1.4.2 secrets: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 7f783c156..43d5f3804 100644 --- a/.gitignore +++ b/.gitignore @@ -103,6 +103,7 @@ celerybeat.pid # Environments .env +!docker_compose/.env .venv env/ venv/ diff --git a/.releaserc b/.releaserc index ca6ba01e3..e659fa4b1 100644 --- a/.releaserc +++ b/.releaserc @@ -55,6 +55,34 @@ } ], "countMatches": true + }, + { + "files": ["docker_compose/.env"], + "from": "^SC4SNMP_TAG=.*", + "to": "SC4SNMP_TAG=\"${nextRelease.version}\"", + "results": [ + { + "file": "docker_compose/.env", + "hasChanged": true, + "numMatches": 1, + "numReplacements": 1 + } + ], + "countMatches": true + }, + { + "files": ["docker_compose/.env"], + "from": "^SC4SNMP_VERSION=.*", + "to": "SC4SNMP_VERSION=\"${nextRelease.version}\"", + "results": [ + { + "file": "docker_compose/.env", + "hasChanged": true, + "numMatches": 1, + "numReplacements": 1 + } + ], + "countMatches": true } ] } @@ -66,7 +94,7 @@ [ "@semantic-release/git", { - "assets": ["NOTICE", "charts/splunk-connect-for-snmp/Chart.yaml", "pyproject.toml", "splunk_connect_for_snmp/__init__.py"], + "assets": ["NOTICE", "charts/splunk-connect-for-snmp/Chart.yaml", "pyproject.toml", "splunk_connect_for_snmp/__init__.py", "docker_compose/.env"], "message": "chore(release): ${nextRelease.version}\n\n${nextRelease.notes}", }, ], diff --git a/CHANGELOG.md b/CHANGELOG.md index a96a1ee4a..d3eba78ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,13 @@ ## Unreleased +### Changed +- add docker compose deployment +- update default microk8s to 1.30 + +### Fixed +- fixed a bug with configuration from values.yaml not being transferred to the UI while migrating to SC4SNMP-UI + ## [1.10.0] ### Changed diff --git a/charts/splunk-connect-for-snmp/Chart.lock b/charts/splunk-connect-for-snmp/Chart.lock index 364fc0e71..a3aae186e 100644 --- a/charts/splunk-connect-for-snmp/Chart.lock +++ b/charts/splunk-connect-for-snmp/Chart.lock @@ -4,9 +4,9 @@ dependencies: version: 13.18.5 - name: redis repository: https://charts.bitnami.com/bitnami - version: 18.5.0 + version: 19.1.0 - name: mibserver repository: https://pysnmp.github.io/mibs/charts/ - version: 1.15.7 -digest: sha256:692c53672741f1c2f75c021c3f75ae45290195e5efaeffc9a06b5cb5b0d6ac12 -generated: "2023-12-11T13:21:44.273403415Z" + version: 1.15.8 +digest: sha256:25bb231b140ccc7aed166d3a142ccfa81e5026560d5009f8b0dedde9ecb0657a +generated: "2024-04-17T09:12:38.843604607Z" diff --git a/charts/splunk-connect-for-snmp/Chart.yaml b/charts/splunk-connect-for-snmp/Chart.yaml index 0f652f1d0..7c985f679 100644 --- a/charts/splunk-connect-for-snmp/Chart.yaml +++ b/charts/splunk-connect-for-snmp/Chart.yaml @@ -14,19 +14,19 @@ type: application # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.10.0 +version: 1.11.0-beta.6 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.10.0" +appVersion: "1.11.0-beta.6" # dependencies: - name: mongodb version: ~13.18.0 repository: https://charts.bitnami.com/bitnami - name: redis - version: ~18.5.0 + version: ~19.1.0 repository: https://charts.bitnami.com/bitnami - name: mibserver version: ~1.15 diff --git a/charts/splunk-connect-for-snmp/templates/NOTES.txt b/charts/splunk-connect-for-snmp/templates/NOTES.txt index 61e1a99d2..94fd81372 100644 --- a/charts/splunk-connect-for-snmp/templates/NOTES.txt +++ b/charts/splunk-connect-for-snmp/templates/NOTES.txt @@ -1,2 +1,2 @@ -Walk profiles no longer include IF-MIB family by default. -If you've used this functionality before, please update the walk profile with ['IF-MIB'] varBind. \ No newline at end of file +Default walk no longer calls full oid tree, instead it is collecting only 'SNMPv2-MIB'. +If you want to call full oid for the devices, you have to set enableFullWalk flag to true. \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/inventory/job.yaml b/charts/splunk-connect-for-snmp/templates/inventory/job.yaml index 675b2defe..bc8f7dbe7 100644 --- a/charts/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/charts/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -50,6 +50,8 @@ spec: value: {{ .Values.scheduler.tasksExpiryTime | quote }} - name: CONFIG_FROM_MONGO value: {{ quote .Values.UI.enable | default "false" }} + - name: ENABLE_FULL_WALK + value: {{ .Values.poller.enableFullWalk | default "false" | quote }} volumeMounts: - name: config mountPath: "/app/config" diff --git a/charts/splunk-connect-for-snmp/templates/ui/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/ui/_helpers.tpl index b56314c55..8cd2a84bd 100644 --- a/charts/splunk-connect-for-snmp/templates/ui/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/ui/_helpers.tpl @@ -51,6 +51,8 @@ spec: value: {{ .Values.scheduler.logLevel | default "INFO" }} - name: CONFIG_FROM_MONGO value: {{ quote .Values.UI.enable | default "false" }} + - name: ENABLE_FULL_WALK + value: {{ .Values.poller.enableFullWalk | default "false" | quote }} volumeMounts: - name: config mountPath: "/app/config" diff --git a/charts/splunk-connect-for-snmp/values.yaml b/charts/splunk-connect-for-snmp/values.yaml index 95ee9a978..1a3023d13 100644 --- a/charts/splunk-connect-for-snmp/values.yaml +++ b/charts/splunk-connect-for-snmp/values.yaml @@ -226,6 +226,9 @@ poller: # https://splunk.github.io/splunk-connect-for-snmp/main/configuration/poller-configuration/#define-usernamesecrets usernameSecrets: [] + # flag to enable polling full walk tree for devices + enableFullWalk: false + # Here is where polling happens. Learn more on how to configure it here: # https://splunk.github.io/splunk-connect-for-snmp/main/configuration/poller-configuration/ diff --git a/create_packages.sh b/create_packages.sh index 54300a38a..76aa17da5 100755 --- a/create_packages.sh +++ b/create_packages.sh @@ -209,7 +209,7 @@ do done pull_ui_images "/tmp/package/$SPLUNK_DIR" -docker save $images_to_pack > /tmp/package/packages/dependencies-images.tar +docker save "$images_to_pack" > /tmp/package/packages/dependencies-images.tar cd ../.. tar -czvf packages/splunk-connect-for-snmp-chart.tar splunk-connect-for-snmp @@ -258,15 +258,15 @@ else fi docker pull "$docker_image_pull" -docker save $docker_image_pull > /tmp/package/packages/sim_image.tar +docker save "$docker_image_pull" > /tmp/package/packages/sim_image.tar # Download and package otel charts cd /tmp/package/packages/ || exit LOCATION=$(curl -s https://api.github.com/repos/signalfx/splunk-otel-collector-chart/releases/latest | grep "zipball_url" | awk '{ print $2 }' | sed 's/,$//' | sed 's/"//g' ) -curl -L -o otel-repo.zip $LOCATION +curl -L -o otel-repo.zip "$LOCATION" unzip otel-repo.zip rm otel-repo.zip -OTEL_DIR=$(pwd)"/"$(ls | grep -E "signalfx-splunk.+") +OTEL_DIR=$(find "$(pwd)" -type d -name "signalfx-splunk*") CHART_DIR="$OTEL_DIR/helm-charts/splunk-otel-collector" OTEL_IMAGE_TAG=$(python3 "$python_script" "$CHART_DIR/Chart.yaml" "appVersion") otel_image=quay.io/signalfx/splunk-otel-collector:"$OTEL_IMAGE_TAG" diff --git a/docker_compose/.env b/docker_compose/.env new file mode 100644 index 000000000..44044faff --- /dev/null +++ b/docker_compose/.env @@ -0,0 +1,66 @@ +# Deployment configuration +SC4SNMP_IMAGE=ghcr.io/splunk/splunk-connect-for-snmp/container +SC4SNMP_TAG="1.11.0-beta.6" +SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH= +TRAPS_CONFIG_FILE_ABSOLUTE_PATH= +INVENTORY_FILE_ABSOLUTE_PATH= +COREFILE_ABS_PATH= +COREDNS_ADDRESS=172.28.0.255 +SC4SNMP_VERSION="1.11.0-beta.6" + +# Dependencies images +COREDNS_IMAGE=coredns/coredns +COREDNS_TAG=1.11.1 +MIBSERVER_IMAGE=ghcr.io/pysnmp/mibs/container +MIBSERVER_TAG=latest +REDIS_IMAGE=docker.io/bitnami/redis +REDIS_TAG=7.2.1-debian-11-r0 +MONGO_IMAGE=docker.io/bitnami/mongodb +MONGO_TAG=6.0.9-debian-11-r5 + +# Splunk instance configuration +SPLUNK_HEC_HOST= +SPLUNK_HEC_PROTOCOL=https +SPLUNK_HEC_PORT=8088 +SPLUNK_HEC_TOKEN= +SPLUNK_HEC_INSECURESSL=false +SPLUNK_SOURCETYPE_TRAPS=sc4snmp:traps +SPLUNK_SOURCETYPE_POLLING_EVENTS=sc4snmp:event +SPLUNK_SOURCETYPE_POLLING_METRICS=sc4snmp:metric +SPLUNK_HEC_INDEX_EVENTS=netops +SPLUNK_HEC_INDEX_METRICS=netmetrics +SPLUNK_HEC_PATH=/services/collector +SPLUNK_AGGREGATE_TRAPS_EVENTS=false +IGNORE_EMPTY_VARBINDS=false + +# Workers configration +WALK_RETRY_MAX_INTERVAL=180 +WALK_MAX_RETRIES=5 +METRICS_INDEXING_ENABLED=false +POLL_BASE_PROFILES=true +IGNORE_NOT_INCREASING_OIDS= +WORKER_LOG_LEVEL=INFO +UDP_CONNECTION_TIMEOUT=3 +MAX_OID_TO_PROCESS=70 +WORKER_POLLER_CONCURRENCY=4 +WORKER_SENDER_CONCURRENCY=4 +WORKER_TRAP_CONCURRENCY=4 +PREFETCH_POLLER_COUNT=1 +PREFETCH_SENDER_COUNT=30 +PREFETCH_TRAP_COUNT=30 +RESOLVE_TRAP_ADDRESS=false +MAX_DNS_CACHE_SIZE_TRAPS=500 +TTL_DNS_CACHE_TRAPS=1800 + +# Inventory configuration +INVENTORY_LOG_LEVEL=INFO +CHAIN_OF_TASKS_EXPIRY_TIME=500 + +# Traps configuration +SNMP_V3_SECURITY_ENGINE_ID=80003a8c04 +TRAPS_PORT=162 + +# Scheduler configuration +SCHEDULER_LOG_LEVEL=INFO + +#Secrets diff --git a/docker_compose/Corefile b/docker_compose/Corefile new file mode 100644 index 000000000..7ea43e1b2 --- /dev/null +++ b/docker_compose/Corefile @@ -0,0 +1,7 @@ +.:53 { + log + errors + auto + reload + forward . 8.8.8.8 +} \ No newline at end of file diff --git a/docker_compose/docker-compose-coredns.yaml b/docker_compose/docker-compose-coredns.yaml new file mode 100644 index 000000000..887991b76 --- /dev/null +++ b/docker_compose/docker-compose-coredns.yaml @@ -0,0 +1,15 @@ +version: '3.8' +services: + coredns: + image: ${COREDNS_IMAGE}:${COREDNS_TAG:-latest} + command: ["-conf", "/Corefile"] + container_name: coredns + restart: on-failure + expose: + - '53' + - '53/udp' + volumes: + - '${COREFILE_ABS_PATH}:/Corefile' + networks: + my_network: + ipv4_address: ${COREDNS_ADDRESS} diff --git a/docker_compose/docker-compose-dependencies.yaml b/docker_compose/docker-compose-dependencies.yaml new file mode 100644 index 000000000..539ac183a --- /dev/null +++ b/docker_compose/docker-compose-dependencies.yaml @@ -0,0 +1,40 @@ +version: '3.8' +services: + snmp-mibserver: + image: ${MIBSERVER_IMAGE}:${MIBSERVER_TAG:-latest} + container_name: snmp-mibserver + environment: + - NGINX_ENTRYPOINT_QUIET_LOGS=${NGINX_ENTRYPOINT_QUIET_LOGS:-1} + volumes: + - snmp-mibserver-tmp:/tmp/ + depends_on: + - coredns + networks: + - my_network + dns: + - ${COREDNS_ADDRESS} + + redis: + image: ${REDIS_IMAGE}:${REDIS_TAG:-latest} + container_name: redis + restart: always + environment: + - ALLOW_EMPTY_PASSWORD=yes + depends_on: + - coredns + networks: + - my_network + dns: + - ${COREDNS_ADDRESS} + mongo: + image: ${MONGO_IMAGE}:${MONGO_TAG:-latest} + container_name: mongo + restart: always + depends_on: + - coredns + networks: + - my_network + dns: + - ${COREDNS_ADDRESS} +volumes: + snmp-mibserver-tmp: null diff --git a/docker_compose/docker-compose-inventory.yaml b/docker_compose/docker-compose-inventory.yaml new file mode 100644 index 000000000..721f3aa3e --- /dev/null +++ b/docker_compose/docker-compose-inventory.yaml @@ -0,0 +1,36 @@ +version: '3.8' +services: + inventory: + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + container_name: sc4snmp-inventory + command: ["inventory"] + environment: + - CONFIG_PATH=/app/config/config.yaml + - REDIS_URL=redis://redis:6379/1 + - CELERY_BROKER_URL=redis://redis:6379/0 + - MONGO_URI=mongodb://mongo:27017/ + - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ + - MIB_INDEX=http://snmp-mibserver:8000/index.csv + - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt + + # Inventory configuration + - LOG_LEVEL=${INVENTORY_LOG_LEVEL:-INFO} + - CHAIN_OF_TASKS_EXPIRY_TIME=${CHAIN_OF_TASKS_EXPIRY_TIME:-500} + - CONFIG_FROM_MONGO=${CONFIG_FROM_MONGO:-false} + depends_on: + - redis + - mongo + - coredns + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml + - ${INVENTORY_FILE_ABSOLUTE_PATH}:/app/inventory/inventory.csv + - inventory-pysnmp-cache-volume:/.pysnmp/ + - inventory-tmp:/tmp/ + restart: on-failure + networks: + - my_network + dns: + - ${COREDNS_ADDRESS} +volumes: + inventory-tmp: null + inventory-pysnmp-cache-volume: null diff --git a/docker_compose/docker-compose-network.yaml b/docker_compose/docker-compose-network.yaml new file mode 100644 index 000000000..ce09f5a6a --- /dev/null +++ b/docker_compose/docker-compose-network.yaml @@ -0,0 +1,7 @@ +version: '3.8' +networks: + my_network: + ipam: + driver: default + config: + - subnet: 172.28.0.0/16 \ No newline at end of file diff --git a/docker_compose/docker-compose-scheduler.yaml b/docker_compose/docker-compose-scheduler.yaml new file mode 100644 index 000000000..c62f35ebb --- /dev/null +++ b/docker_compose/docker-compose-scheduler.yaml @@ -0,0 +1,34 @@ +version: '3.8' +services: + scheduler: + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + container_name: sc4snmp-scheduler + command: ["celery", "beat"] + environment: + - CONFIG_PATH=/app/config/config.yaml + - REDIS_URL=redis://redis:6379/1 + - CELERY_BROKER_URL=redis://redis:6379/0 + - MONGO_URI=mongodb://mongo:27017/ + - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ + - MIB_INDEX=http://snmp-mibserver:8000/index.csv + - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt + - INVENTORY_REFRESH_RATE=${INVENTORY_REFRESH_RATE:-600} + + # Scheduler configuration + - LOG_LEVEL=${SCHEDULER_LOG_LEVEL:-INFO} + depends_on: + - redis + - mongo + - coredns + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml + - scheduler-pysnmp-cache-volume:/.pysnmp/ + - scheduler-tmp:/tmp/ + restart: on-failure + networks: + - my_network + dns: + - ${COREDNS_ADDRESS} +volumes: + scheduler-tmp: null + scheduler-pysnmp-cache-volume: null \ No newline at end of file diff --git a/docker_compose/docker-compose-secrets.yaml b/docker_compose/docker-compose-secrets.yaml new file mode 100644 index 000000000..c1dae5281 --- /dev/null +++ b/docker_compose/docker-compose-secrets.yaml @@ -0,0 +1,2 @@ +secrets: {} +version: '3.8' diff --git a/docker_compose/docker-compose-traps.yaml b/docker_compose/docker-compose-traps.yaml new file mode 100644 index 000000000..bf8aae6b7 --- /dev/null +++ b/docker_compose/docker-compose-traps.yaml @@ -0,0 +1,46 @@ +services: + traps: + command: + - trap + container_name: sc4snmp-traps + depends_on: + - redis + - mongo + - coredns + dns: + - ${COREDNS_ADDRESS} + environment: + - CONFIG_PATH=/app/config/config.yaml + - REDIS_URL=redis://redis:6379/1 + - CELERY_BROKER_URL=redis://redis:6379/0 + - MONGO_URI=mongodb://mongo:27017/ + - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ + - MIB_INDEX=http://snmp-mibserver:8000/index.csv + - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt + - LOG_LEVEL=${SCHEDULER_LOG_LEVEL:-INFO} + - INVENTORY_REFRESH_RATE=${INVENTORY_REFRESH_RATE:-600} + - SPLUNK_HEC_HOST=${SPLUNK_HEC_HOST} + - SPLUNK_HEC_SCHEME=${SPLUNK_HEC_PROTOCOL:-https} + - SPLUNK_HEC_PORT=${SPLUNK_HEC_PORT} + - SPLUNK_HEC_TOKEN=${SPLUNK_HEC_TOKEN} + - SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false} + - SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector} + - SNMP_V3_SECURITY_ENGINE_ID=${SNMP_V3_SECURITY_ENGINE_ID:-80003a8c04} + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + networks: + - my_network + ports: + - mode: host + protocol: udp + published: ${TRAPS_PORT} + target: 2162 + restart: on-failure + secrets: [] + volumes: + - ${TRAPS_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml + - traps-pysnmp-cache-volume:/.pysnmp/ + - traps-tmp:/tmp/ +version: '3.8' +volumes: + traps-pysnmp-cache-volume: null + traps-tmp: null diff --git a/docker_compose/docker-compose-worker-poller.yaml b/docker_compose/docker-compose-worker-poller.yaml new file mode 100644 index 000000000..6f7b9441a --- /dev/null +++ b/docker_compose/docker-compose-worker-poller.yaml @@ -0,0 +1,58 @@ +services: + worker-poller: + command: + - celery + - worker-poller + container_name: sc4snmp-worker-poller + depends_on: + - redis + - mongo + - coredns + dns: + - ${COREDNS_ADDRESS} + environment: + - CONFIG_PATH=/app/config/config.yaml + - REDIS_URL=redis://redis:6379/1 + - CELERY_BROKER_URL=redis://redis:6379/0 + - MONGO_URI=mongodb://mongo:27017/ + - SC4SNMP_VERSION=${SC4SNMP_VERSION:-0.0.0} + - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ + - MIB_INDEX=http://snmp-mibserver:8000/index.csv + - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt + - SPLUNK_HEC_HOST=${SPLUNK_HEC_HOST} + - SPLUNK_HEC_SCHEME=${SPLUNK_HEC_PROTOCOL:-https} + - SPLUNK_HEC_PORT=${SPLUNK_HEC_PORT} + - SPLUNK_HEC_TOKEN=${SPLUNK_HEC_TOKEN} + - SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false} + - SPLUNK_SOURCETYPE_TRAPS=${SPLUNK_SOURCETYPE_TRAPS:-sc4snmp:traps} + - SPLUNK_SOURCETYPE_POLLING_EVENTS=${SPLUNK_SOURCETYPE_POLLING_EVENTS:-sc4snmp:event} + - SPLUNK_SOURCETYPE_POLLING_METRICS=${SPLUNK_SOURCETYPE_POLLING_METRICS:-sc4snmp:metric} + - SPLUNK_HEC_INDEX_EVENTS=${SPLUNK_HEC_INDEX_EVENTS:-netops} + - SPLUNK_HEC_INDEX_METRICS=${SPLUNK_HEC_INDEX_METRICS:-netmetrics} + - SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector} + - SPLUNK_AGGREGATE_TRAPS_EVENTS=${SPLUNK_AGGREGATE_TRAPS_EVENTS:-false} + - IGNORE_EMPTY_VARBINDS=${IGNORE_EMPTY_VARBINDS:-false} + - WALK_RETRY_MAX_INTERVAL=${WALK_RETRY_MAX_INTERVAL:-180} + - WALK_MAX_RETRIES=${WALK_MAX_RETRIES:-5} + - METRICS_INDEXING_ENABLED=${METRICS_INDEXING_ENABLED:-false} + - POLL_BASE_PROFILES=${POLL_BASE_PROFILES:-true} + - IGNORE_NOT_INCREASING_OIDS=${IGNORE_NOT_INCREASING_OIDS:-} + - LOG_LEVEL=${WORKER_LOG_LEVEL:-INFO} + - UDP_CONNECTION_TIMEOUT=${UDP_CONNECTION_TIMEOUT:-3} + - MAX_OID_TO_PROCESS=${MAX_OID_TO_PROCESS:-70} + - PROFILES_RELOAD_DELAY=${PROFILES_RELOAD_DELAY:-60} + - WORKER_CONCURRENCY=${WORKER_POLLER_CONCURRENCY:-2} + - PREFETCH_COUNT=${PREFETCH_POLLER_COUNT:-1} + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + networks: + - my_network + restart: on-failure + secrets: [] + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml + - worker-poller-pysnmp-cache-volume:/.pysnmp/ + - worker-poller-tmp:/tmp/ +version: '3.8' +volumes: + worker-poller-pysnmp-cache-volume: null + worker-poller-tmp: null diff --git a/docker_compose/docker-compose-worker-sender.yaml b/docker_compose/docker-compose-worker-sender.yaml new file mode 100644 index 000000000..425343a54 --- /dev/null +++ b/docker_compose/docker-compose-worker-sender.yaml @@ -0,0 +1,60 @@ +version: '3.8' +services: + worker-sender: + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + container_name: sc4snmp-worker-sender + command: ["celery", "worker-sender"] + environment: + - CONFIG_PATH=/app/config/config.yaml + - REDIS_URL=redis://redis:6379/1 + - CELERY_BROKER_URL=redis://redis:6379/0 + - MONGO_URI=mongodb://mongo:27017/ + - SC4SNMP_VERSION=${SC4SNMP_VERSION:-0.0.0} + - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ + - MIB_INDEX=http://snmp-mibserver:8000/index.csv + - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt + #- OTEL_METRICS_URL= #If sim enabled + + # Splunk instance configuration + - SPLUNK_HEC_HOST=${SPLUNK_HEC_HOST} + - SPLUNK_HEC_SCHEME=${SPLUNK_HEC_PROTOCOL:-https} + - SPLUNK_HEC_PORT=${SPLUNK_HEC_PORT} + - SPLUNK_HEC_TOKEN=${SPLUNK_HEC_TOKEN} + - SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false} + - SPLUNK_SOURCETYPE_TRAPS=${SPLUNK_SOURCETYPE_TRAPS:-sc4snmp:traps} + - SPLUNK_SOURCETYPE_POLLING_EVENTS=${SPLUNK_SOURCETYPE_POLLING_EVENTS:-sc4snmp:event} + - SPLUNK_SOURCETYPE_POLLING_METRICS=${SPLUNK_SOURCETYPE_POLLING_METRICS:-sc4snmp:metric} + - SPLUNK_HEC_INDEX_EVENTS=${SPLUNK_HEC_INDEX_EVENTS:-netops} + - SPLUNK_HEC_INDEX_METRICS=${SPLUNK_HEC_INDEX_METRICS:-netmetrics} + - SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector} + - SPLUNK_AGGREGATE_TRAPS_EVENTS=${SPLUNK_AGGREGATE_TRAPS_EVENTS:-false} + - IGNORE_EMPTY_VARBINDS=${IGNORE_EMPTY_VARBINDS:-false} + + # Workers configuration + - WALK_RETRY_MAX_INTERVAL=${WALK_RETRY_MAX_INTERVAL:-180} + - WALK_MAX_RETRIES=${WALK_MAX_RETRIES:-5} + - METRICS_INDEXING_ENABLED=${METRICS_INDEXING_ENABLED:-false} + - POLL_BASE_PROFILES=${POLL_BASE_PROFILES:-true} + - IGNORE_NOT_INCREASING_OIDS=${IGNORE_NOT_INCREASING_OIDS:-} + - LOG_LEVEL=${WORKER_LOG_LEVEL:-INFO} + - UDP_CONNECTION_TIMEOUT=${UDP_CONNECTION_TIMEOUT:-3} + - MAX_OID_TO_PROCESS=${MAX_OID_TO_PROCESS:-70} + - PROFILES_RELOAD_DELAY=${PROFILES_RELOAD_DELAY:-60} + - WORKER_CONCURRENCY=${WORKER_SENDER_CONCURRENCY:-2} + - PREFETCH_COUNT=${PREFETCH_SENDER_COUNT:-1} + depends_on: + - redis + - mongo + - coredns + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml + - worker-sender-pysnmp-cache-volume:/.pysnmp/ + - worker-sender-tmp:/tmp/ + restart: on-failure + networks: + - my_network + dns: + - ${COREDNS_ADDRESS} +volumes: + worker-sender-tmp: null + worker-sender-pysnmp-cache-volume: null \ No newline at end of file diff --git a/docker_compose/docker-compose-worker-trap.yaml b/docker_compose/docker-compose-worker-trap.yaml new file mode 100644 index 000000000..eddcedb80 --- /dev/null +++ b/docker_compose/docker-compose-worker-trap.yaml @@ -0,0 +1,63 @@ +version: '3.8' +services: + worker-trap: + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + container_name: sc4snmp-worker-trap + command: ["celery", "worker-trap"] + environment: + - CONFIG_PATH=/app/config/config.yaml + - REDIS_URL=redis://redis:6379/1 + - CELERY_BROKER_URL=redis://redis:6379/0 + - MONGO_URI=mongodb://mongo:27017/ + - SC4SNMP_VERSION=${SC4SNMP_VERSION:-0.0.0} + - MIB_SOURCES=http://snmp-mibserver:8000/asn1/@mib@ + - MIB_INDEX=http://snmp-mibserver:8000/index.csv + - MIB_STANDARD=http://snmp-mibserver:8000/standard.txt + #- OTEL_METRICS_URL= #If sim enabled + + # Splunk instance configuration + - SPLUNK_HEC_HOST=${SPLUNK_HEC_HOST} + - SPLUNK_HEC_SCHEME=${SPLUNK_HEC_PROTOCOL:-https} + - SPLUNK_HEC_PORT=${SPLUNK_HEC_PORT} + - SPLUNK_HEC_TOKEN=${SPLUNK_HEC_TOKEN} + - SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false} + - SPLUNK_SOURCETYPE_TRAPS=${SPLUNK_SOURCETYPE_TRAPS:-sc4snmp:traps} + - SPLUNK_SOURCETYPE_POLLING_EVENTS=${SPLUNK_SOURCETYPE_POLLING_EVENTS:-sc4snmp:event} + - SPLUNK_SOURCETYPE_POLLING_METRICS=${SPLUNK_SOURCETYPE_POLLING_METRICS:-sc4snmp:metric} + - SPLUNK_HEC_INDEX_EVENTS=${SPLUNK_HEC_INDEX_EVENTS:-netops} + - SPLUNK_HEC_INDEX_METRICS=${SPLUNK_HEC_INDEX_METRICS:-netmetrics} + - SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector} + - SPLUNK_AGGREGATE_TRAPS_EVENTS=${SPLUNK_AGGREGATE_TRAPS_EVENTS:-false} + - IGNORE_EMPTY_VARBINDS=${IGNORE_EMPTY_VARBINDS:-false} + + # Workers configuration + - WALK_RETRY_MAX_INTERVAL=${WALK_RETRY_MAX_INTERVAL:-180} + - WALK_MAX_RETRIES=${WALK_MAX_RETRIES:-5} + - METRICS_INDEXING_ENABLED=${METRICS_INDEXING_ENABLED:-false} + - POLL_BASE_PROFILES=${POLL_BASE_PROFILES:-true} + - IGNORE_NOT_INCREASING_OIDS=${IGNORE_NOT_INCREASING_OIDS:-} + - LOG_LEVEL=${WORKER_LOG_LEVEL:-INFO} + - UDP_CONNECTION_TIMEOUT=${UDP_CONNECTION_TIMEOUT:-3} + - MAX_OID_TO_PROCESS=${MAX_OID_TO_PROCESS:-70} + - PROFILES_RELOAD_DELAY=${PROFILES_RELOAD_DELAY:-60} + - WORKER_CONCURRENCY=${WORKER_TRAP_CONCURRENCY:-2} + - PREFETCH_COUNT=${PREFETCH_TRAP_COUNT:-1} + - RESOLVE_TRAP_ADDRESS=${RESOLVE_TRAP_ADDRESS:-false} + - MAX_DNS_CACHE_SIZE_TRAPS=${MAX_DNS_CACHE_SIZE_TRAPS:-500} + - TTL_DNS_CACHE_TRAPS=${TTL_DNS_CACHE_TRAPS:-1800} + depends_on: + - redis + - mongo + - coredns + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml + - worker-trap-pysnmp-cache-volume:/.pysnmp/ + - worker-trap-tmp:/tmp/ + restart: on-failure + networks: + - my_network + dns: + - ${COREDNS_ADDRESS} +volumes: + worker-trap-tmp: null + worker-trap-pysnmp-cache-volume: null \ No newline at end of file diff --git a/docker_compose/manage_secrets.py b/docker_compose/manage_secrets.py new file mode 100644 index 000000000..5e5e0173e --- /dev/null +++ b/docker_compose/manage_secrets.py @@ -0,0 +1,345 @@ +import argparse +import os +from typing import Union + +import yaml + + +def human_bool(flag: Union[str, bool], default: bool = False) -> bool: + if flag is None: + return False + if isinstance(flag, bool): + return flag + if flag.lower() in [ + "true", + "1", + "t", + "y", + "yes", + ]: + return True + elif flag.lower() in [ + "false", + "0", + "f", + "n", + "no", + ]: + return False + else: + return default + + +def remove_variables_from_env(file_path: str, variables_to_remove: list): + """ + Function to remove variables from .env file + @param file_path: path to .env file + @param variables_to_remove: names of variables to remove + """ + try: + with open(file_path) as env_file: + lines = env_file.readlines() + + with open(file_path, "w") as env_file: + for line in lines: + key = line.split("=")[0].strip() + if key not in variables_to_remove: + env_file.write(line) + + print("Variables removed successfully from .env file.") + except Exception as e: + print(f"Error: {e}") + + +def create_secrets( + variables: dict, + path_to_compose_files: str, + secret_name: str, + make_change_in_worker_poller: bool, + make_change_in_traps: bool, +): + """ + Function to create secrets in .env and docker-compose.yaml files + @param variables: dictionary mapping variable names to their values + @param path_to_compose_files: absolute path to directory with .env and docker-compose.yaml files + @param secret_name: name of the secret + @param make_change_in_worker_poller: flag indicating whether to add secrets to worker poller service + @param make_change_in_traps: flag indicating whether to add secrets to traps service + """ + for k, v in variables.items(): + if k != "contextEngineId" and not v: + raise ValueError(f"Value {k} is not set") + + # list for storing secrets configuration which should be added to docker-compose-secrets.yaml + new_secrets = [] + # list for storing secrets configuration which should be added to docker-compose-worker-poller.yaml and + # docker-compose-traps.yaml services + new_secrets_in_workers = [] + + for k, v in variables.items(): + if v: + new_secrets.append( + { + "secret_name": f"{secret_name}_{k}", + "secret_config": {"environment": f"{secret_name}_{k}"}, + } + ) + new_secrets_in_workers.append( + { + "source": f"{secret_name}_{k}", + "target": f"/app/secrets/snmpv3/{secret_name}/{k}", + } + ) + + try: + # Load docker-compose-secrets.yaml to a dictionary and update "secrets" section. If the same secret + # has been already configured, stop processing further. + with open( + os.path.join(path_to_compose_files, "docker-compose-secrets.yaml") + ) as file: + secrets_file = yaml.load(file, Loader=yaml.FullLoader) + if secrets_file["secrets"] is None or "secrets" not in secrets_file: + secrets_file["secrets"] = {} + for new_secret in new_secrets: + if new_secret["secret_name"] in secrets_file["secrets"]: + print(f"Secret {secret_name} already configured. New secret not added.") + return + secrets_file["secrets"][new_secret["secret_name"]] = new_secret[ + "secret_config" + ] + secrets_file_ready = True + except: + print("Problem with editing docker-compose-secrets.yaml. Secret not added.") + secrets_file_ready = False + + if make_change_in_worker_poller: + # If the secret should be added to worker poller, load docker-compose-worker-poller.yaml to a dictionary and + # update "secrets" section. + try: + with open( + os.path.join(path_to_compose_files, "docker-compose-worker-poller.yaml") + ) as file: + worker_poller_file = yaml.load(file, Loader=yaml.FullLoader) + if "secrets" not in worker_poller_file["services"]["worker-poller"]: + worker_poller_file["services"]["worker-poller"]["secrets"] = [] + worker_poller_file["services"]["worker-poller"]["secrets"].extend( + new_secrets_in_workers + ) + worker_poller_file_ready = True + except: + print( + "Problem with editing docker-compose-worker-poller.yaml. Secret not added." + ) + worker_poller_file_ready = False + else: + worker_poller_file_ready = True + + if make_change_in_traps: + # If the secret should be added to traps, load docker-compose-traps.yaml to a dictionary and + # update "secrets" section. + try: + with open( + os.path.join(path_to_compose_files, "docker-compose-traps.yaml") + ) as file: + traps_file = yaml.load(file, Loader=yaml.FullLoader) + if "secrets" not in traps_file["services"]["traps"]: + traps_file["services"]["traps"]["secrets"] = [] + traps_file["services"]["traps"]["secrets"].extend(new_secrets_in_workers) + traps_file_ready = True + except: + print("Problem with editing docker-compose-traps.yaml. Secret not added.") + traps_file_ready = False + else: + traps_file_ready = True + + if secrets_file_ready and worker_poller_file_ready and traps_file_ready: + # If all three files were loaded into dictionary and updated successfully, + # save the latest configuration to files. + with open( + os.path.join(path_to_compose_files, "docker-compose-secrets.yaml"), "w" + ) as file: + yaml.dump(secrets_file, file, default_flow_style=False) + + with open(os.path.join(path_to_compose_files, ".env"), "a") as file: + for k, v in variables.items(): + if v: + file.write(f"\n{secret_name}_{k}={v}") + + if make_change_in_worker_poller: + with open( + os.path.join( + path_to_compose_files, "docker-compose-worker-poller.yaml" + ), + "w", + ) as file: + yaml.dump(worker_poller_file, file, default_flow_style=False) + + if make_change_in_traps: + with open( + os.path.join(path_to_compose_files, "docker-compose-traps.yaml"), "w" + ) as file: + yaml.dump(traps_file, file, default_flow_style=False) + + +def delete_secrets( + variables: dict, + path_to_compose_files: str, + secret_name: str, + make_change_in_worker_poller: bool, + make_change_in_traps: bool, +): + """ + Function to delete secrets from .env and docker-compose.yaml files + @param variables: dictionary mapping variable names to their values + @param path_to_compose_files: absolute path to directory with .env and docker-compose.yaml files + @param secret_name: name of the secret + @param make_change_in_worker_poller: flag indicating whether to delete secrets from worker poller service + @param make_change_in_traps: flag indicating whether to delete secrets from traps service + """ + secrets = [] + for key in variables.keys(): + secrets.append(f"{secret_name}_{key}") + + # Load docker-compose-secrets.yaml file to a dictionary and delete desired secrets + with open( + os.path.join(path_to_compose_files, "docker-compose-secrets.yaml") + ) as file: + secrets_file = yaml.load(file, Loader=yaml.FullLoader) + for secret in secrets: + if secret in secrets_file["secrets"]: + del secrets_file["secrets"][secret] + + # Save the updated docker-compose-secrets.yaml configuration + with open( + os.path.join(path_to_compose_files, "docker-compose-secrets.yaml"), "w" + ) as file: + yaml.dump(secrets_file, file, default_flow_style=False) + + # Delete secrets from .env + try: + # Read lines from .env + with open(os.path.join(path_to_compose_files, ".env")) as env_file: + lines = env_file.readlines() + + with open(os.path.join(path_to_compose_files, ".env"), "w") as env_file: + lines_to_write = [] + # If the environmental variable is NOT one of the secrets destined for deletion, add them to lines_to_write + for line in lines: + key = line.split("=")[0].strip() + if key not in secrets: + lines_to_write.append(line.strip()) + + # Save each line to .env. The last line should be saved without a new line symbol + for i, line in enumerate(lines_to_write): + if i < len(lines_to_write) - 1: + env_file.write(f"{line}\n") + else: + env_file.write(line) + except Exception as e: + print(f"Error: {e}") + + if make_change_in_worker_poller: + # Load docker-compose-worker-poller.yaml to dictionary and filter out secrets destined for deletion + with open( + os.path.join(path_to_compose_files, "docker-compose-worker-poller.yaml") + ) as file: + worker_poller_file = yaml.load(file, Loader=yaml.FullLoader) + worker_poller_file["services"]["worker-poller"]["secrets"] = list( + filter( + lambda el: el["source"] not in secrets, + worker_poller_file["services"]["worker-poller"]["secrets"], + ) + ) + + # Save updated docker-compose-worker-poller.yaml configuration + with open( + os.path.join(path_to_compose_files, "docker-compose-worker-poller.yaml"), + "w", + ) as file: + yaml.dump(worker_poller_file, file, default_flow_style=False) + + if make_change_in_traps: + # Load docker-compose-traps.yaml to dictionary and filter out secrets destined for deletion + with open( + os.path.join(path_to_compose_files, "docker-compose-traps.yaml") + ) as file: + traps_file = yaml.load(file, Loader=yaml.FullLoader) + traps_file["services"]["traps"]["secrets"] = list( + filter( + lambda el: el["source"] not in secrets, + traps_file["services"]["traps"]["secrets"], + ) + ) + + # Save updated docker-compose-traps.yaml configuration + with open( + os.path.join(path_to_compose_files, "docker-compose-traps.yaml"), "w" + ) as file: + yaml.dump(traps_file, file, default_flow_style=False) + + +def main(): + parser = argparse.ArgumentParser(description="Manage secrets in docker compose") + parser.add_argument("--delete", default="false", help="If true, delete the secret") + parser.add_argument("--secret_name", help="Secret name") + parser.add_argument("--path_to_compose", help="Path to dockerfiles") + parser.add_argument( + "--worker_poller", default="true", help="Add secret to worker poller" + ) + parser.add_argument("--traps", default="true", help="Add secret to traps") + parser.add_argument("--userName", default="", help="SNMPV3 username") + parser.add_argument("--privProtocol", default="", help="SNMPV3 privProtocol") + parser.add_argument("--privKey", default="", help="SNMPV3 privKey") + parser.add_argument("--authProtocol", default="", help="SNMPV3 authProtocol") + parser.add_argument("--authKey", default="", help="SNMPV3 authKey") + parser.add_argument("--contextEngineId", default="", help="SNMPV3 contextEngineId") + + args = parser.parse_args() + + # Assign inputs from command line to variables + delete_secret = human_bool(args.delete) + secret_name = args.secret_name + path_to_compose_files = args.path_to_compose + make_change_in_worker_poller = human_bool(args.worker_poller) + make_change_in_traps = human_bool(args.traps) + + # variables dictionary maps variables names stored inside a secret to their values + variables = { + "userName": args.userName, + "privProtocol": args.privProtocol, + "privKey": args.privKey, + "authProtocol": args.authProtocol, + "authKey": args.authKey, + "contextEngineId": args.contextEngineId, + } + + if not os.path.exists(path_to_compose_files): + print("Path to compose files doesn't exist") + return + if not secret_name: + print("Secret name not specified") + return + + if not delete_secret: + try: + create_secrets( + variables, + path_to_compose_files, + secret_name, + make_change_in_worker_poller, + make_change_in_traps, + ) + except ValueError as e: + print(e) + else: + delete_secrets( + variables, + path_to_compose_files, + secret_name, + make_change_in_worker_poller, + make_change_in_traps, + ) + + +if __name__ == "__main__": + main() diff --git a/docs/bestpractices.md b/docs/bestpractices.md index c5d296b77..1cfc0f762 100644 --- a/docs/bestpractices.md +++ b/docs/bestpractices.md @@ -82,6 +82,8 @@ If you put in only the IP address (for example, `127.0.0.1`), then errors will b See [Configure small walk profile](../configuration/configuring-profiles/#walk-profile) to enable the small walk functionality. +Check if `poller.enableFullWalk` flag is set to `false`. See [poller configuration](../configuration/poller-configuration/#poller-configuration-file). + ### An error of SNMP isWalk=True blocks traffic on the SC4SNMP instance If you see many `An error of SNMP isWalk=True` errors in your logs, that means that there is a connection problem with the hosts you're polling from. @@ -106,29 +108,32 @@ If 64-bit counter is not supported on your device, you can write your own Splunk the maximum integer value and the current state. The same works for values large enough that they don't fit into a 64-bit value. An example for an appropriate Splunk query would be the following: +``` +| streamstats current=f last(ifInOctets) as p_ifInOctets last(ifOutOctets) as p_ifOutOctets by ifAlias +| eval in_delta=(ifInOctets - p_ifInOctets) +| eval out_delta=(ifOutOctets - p_ifOutOctets) +| eval max=pow(2,64) +| eval out = if(out_delta<0,((max+out_delta)*8/(5*60*1000*1000*1000)),(out_delta)*8/(5*60*1000*1000*1000)) +| timechart span=5m avg(in) AS in, avg(out) AS out by ifAlias +``` + +### Polling authentication errors -### Unknown USM user +#### Unknown USM user In case of polling SNMPv3 devices, `Unknown USM user` error suggests wrong username. Verify that the kubernetes secret with the correct username has been created ([SNMPv3 configuration](configuration/snmpv3-configuration.md)). -### Wrong SNMP PDU digest +#### Wrong SNMP PDU digest In case of polling SNMPv3 devices, `Wrong SNMP PDU digest` error suggests wrong authentication key. Verify that the kubernetes secret with the correct authentication key has been created ([SNMPv3 configuration](configuration/snmpv3-configuration.md)). -### No SNMP response received before timeout +#### No SNMP response received before timeout `No SNMP response received before timeout` error might have several root causes. Some of them are: + - wrong device IP or port - SNMPv2c wrong community string - SNMPv3 wrong privacy key -``` -| streamstats current=f last(ifInOctets) as p_ifInOctets last(ifOutOctets) as p_ifOutOctets by ifAlias -| eval in_delta=(ifInOctets - p_ifInOctets) -| eval out_delta=(ifOutOctets - p_ifOutOctets) -| eval max=pow(2,64) -| eval out = if(out_delta<0,((max+out_delta)*8/(5*60*1000*1000*1000)),(out_delta)*8/(5*60*1000*1000*1000)) -| timechart span=5m avg(in) AS in, avg(out) AS out by ifAlias -``` ### "Field is immutable" error during helm upgrade ``` diff --git a/docs/configuration/configuring-profiles.md b/docs/configuration/configuring-profiles.md index 5a54c61fc..0350609c8 100644 --- a/docs/configuration/configuring-profiles.md +++ b/docs/configuration/configuring-profiles.md @@ -136,6 +136,8 @@ scheduler: Sometimes static profiles have additional functionalities to be used in specific scenarios. #### WALK profile +BETA NOTE: By default the walk without any profile set is polling only `SNMPv2-MIB`. For changing the scope of the walk you can configure new walk profile or use `enableFullWalk` flag. +More about it in [poller configuration](../poller-configuration/#poller-configuration-file). If you would like to limit the scope of the walk, you should set one of the profiles in the inventory to point to the profile definition of the `walk` type: ```yaml diff --git a/docs/configuration/poller-configuration.md b/docs/configuration/poller-configuration.md index 0e58897f0..7d55a8cb4 100644 --- a/docs/configuration/poller-configuration.md +++ b/docs/configuration/poller-configuration.md @@ -23,6 +23,7 @@ poller: - sc4snmp-hlab-sha-aes - sc4snmp-hlab-sha-des logLevel: "WARN" + enableFullWalk: false inventory: | address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete 10.202.4.202,,2c,public,,,2000,,, @@ -30,6 +31,9 @@ poller: NOTE: The header's line (`address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete`) is necessary for the correct execution of SC4SNMP. Do not remove it. +### BETA: Enable full oid tree walk +The default walk profile is polling only `SNMPv2-MIB`. If the full oid tree walk is required it can be enabled by changing `enableFullWalk` flag to true. + ### Define log level The log level for poller can be set by changing the value for the key `logLevel`. The allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`. The default value is `WARNING`. diff --git a/docs/configuration/step-by-step-poll.md b/docs/configuration/step-by-step-poll.md index be4dcd65a..726f15be6 100644 --- a/docs/configuration/step-by-step-poll.md +++ b/docs/configuration/step-by-step-poll.md @@ -8,7 +8,7 @@ In the following example, there are 4 hosts you want to poll from: 4. `10.202.4.204:163` To retrieve data from the device efficiently, first determine the specific data needed. Instead of walking through -the entire `1.3.6.1`, limit the walk to poll only the necessary data. Configure the `IF-MIB` family for interfaces and +the entire `1.3.6.1` or polling only `SNMPv2-MIB`, limit or expand the walk to poll the necessary data. Configure the `IF-MIB` family for interfaces and the `UCD-SNMP-MIB` for CPU-related statistics. In the `scheduler` section of `values.yaml`, define the target group and establish the polling parameters, known as the profile, to gather the desired data precisely. See the following example: diff --git a/docs/dockercompose/1-install-docker.md b/docs/dockercompose/1-install-docker.md new file mode 100644 index 000000000..7951d433e --- /dev/null +++ b/docs/dockercompose/1-install-docker.md @@ -0,0 +1,5 @@ +# Install Docker + +To install `Docker` in your environment follow steps from the `Install using the apt repository` section from +the Docker [documentation](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository). Install the +latest version. \ No newline at end of file diff --git a/docs/dockercompose/2-download-package.md b/docs/dockercompose/2-download-package.md new file mode 100644 index 000000000..7534e822e --- /dev/null +++ b/docs/dockercompose/2-download-package.md @@ -0,0 +1,27 @@ +# Download package with docker compose files + +## Downloading a package +Package with docker compose configuration files (`docker_compose.zip`) can be downloaded from the [Github release](https://github.com/splunk/splunk-connect-for-snmp/releases). + +## Configuration +To configure the deployment, follow the instructions in [Inventory configuration](./3-inventory-configuration.md), +[Scheduler configuration](./4-scheduler-configuration.md), [Traps configuration](./5-traps-configuration.md), +[.env file configuration](./6-env-file-configuration.md), [SNMPv3 secrets](./7-snmpv3-secrets.md) + +## Deploying the app +After configuration, application can be deployed by running the +following command inside the `docker_compose` directory: + +```shell +sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d +``` + +The same command can be run to apply any updated configuration changes. + +## Uninstall the app + +To uninstall the app, run the following command inside the `docker_compose` directory: + +```shell +sudo docker compose $(find docker* | sed -e 's/^/-f /') down +``` \ No newline at end of file diff --git a/docs/dockercompose/3-inventory-configuration.md b/docs/dockercompose/3-inventory-configuration.md new file mode 100644 index 000000000..b14937fa2 --- /dev/null +++ b/docs/dockercompose/3-inventory-configuration.md @@ -0,0 +1,12 @@ +# Inventory configuration + +Inventory configuration is stored in the `inventory.csv` file. Structure of this file is the same as the one of the +`poller.inventory` section in `values.yaml` file. Documentation of this section can be found in [configure inventory](../configuration/poller-configuration.md#configure-inventory). + +## Example of the configuration + +```csv +address,port,version,community,secret,securityEngine,walk_interval,profiles,smart_profiles,delete +0.0.0.0,161,2c,public,,,1800,small_walk;test_profile,t, +my_group,161,3,,my_secret,,1800,single_metric,t, +``` \ No newline at end of file diff --git a/docs/dockercompose/4-scheduler-configuration.md b/docs/dockercompose/4-scheduler-configuration.md new file mode 100644 index 000000000..f1dc20d67 --- /dev/null +++ b/docs/dockercompose/4-scheduler-configuration.md @@ -0,0 +1,67 @@ +# Scheduler configuration + +Scheduler configuration is stored in the `scheduler-config.yaml` file. This file has the following sections: + +```yaml +communities: + 2c: + public: + communityIndex: + contextEngineId: + contextName: + tag: + securityName: +customTranslations: +profiles: +groups: +``` + +- `communities`: communities used for version `1` and `2c` of the `snmp`. The default one is `public`. +- `customTranslations`: configuration of the custom translations. Configuration of this section looks the same as in the `values.yaml` in `scheduler.customTranslations` section, which can be checked in the documentation of [custom translations](../configuration/configuring-profiles.md#custom-translations). +- `profiles`: configuration of the profiles. Configuration of this section looks the same as in the `values.yaml` in `scheduler.profiles` section, which can be checked in the documentation og [profiles configuration](../configuration/configuring-profiles.md). +- `groups`: configuration of the groups. Configuration of this section looks the same as in the `values.yaml` in `scheduler.groups` section, which can be checked in the documentation of [groups configuration](../configuration/configuring-groups.md). + +## Example of the configuration + +```yaml +communities: + 2c: + public: + communityIndex: + contextEngineId: + contextName: + tag: + securityName: +customTranslations: + IF-MIB: + ifInDiscards: myCustomName1 + ifOutErrors: myCustomName2 + SNMPv2-MIB: + sysDescr: myCustomName3 +profiles: + small_walk: + condition: + type: "walk" + varBinds: + - [ 'IP-MIB' ] + - [ 'IF-MIB' ] + - [ 'TCP-MIB' ] + - [ 'UDP-MIB' ] + multiple_conditions: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - [ 'IF-MIB', 'ifOutDiscards' ] +groups: + group1: + - address: 18.116.10.255 + port: 1163 +``` diff --git a/docs/dockercompose/5-traps-configuration.md b/docs/dockercompose/5-traps-configuration.md new file mode 100644 index 000000000..9ec5edf14 --- /dev/null +++ b/docs/dockercompose/5-traps-configuration.md @@ -0,0 +1,33 @@ +# Traps configuration + +Scheduler configuration is stored in the `traps-config.yaml` file. This file has the following sections: + +```yaml +communities: + 2c: + public: + communityIndex: + contextEngineId: + contextName: + tag: + securityName: +usernameSecrets: [] +``` + +- `communities`: communities used for version `1` and `2c` of the snmp. The default one is `public`. +- `usernameSecrets`: names of the secrets configured in docker used for `snmp v3` traps . + +## Example of the configuration + +```yaml +communities: + 2c: + public: + communityIndex: + contextEngineId: + contextName: + tag: + securityName: +usernameSecrets: + - my_secret +``` \ No newline at end of file diff --git a/docs/dockercompose/6-env-file-configuration.md b/docs/dockercompose/6-env-file-configuration.md new file mode 100644 index 000000000..08d1fcc44 --- /dev/null +++ b/docs/dockercompose/6-env-file-configuration.md @@ -0,0 +1,89 @@ +# .env file configuration + +Inside the directory with the docker compose files, there is a `.env`. Variables in it can be divided into few sections. + +## Deployment + +| Variable | Description | +|---------------------------------------|------------------------------------------------------------------------------------------------------| +| `SC4SNMP_IMAGE` | The registry and name of the SC4SNMP image to pull | +| `SC4SNMP_TAG` | SC4SNMP image tag to pull | +| `SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH` | Absolute path to [scheduler-config.yaml](./4-scheduler-configuration.md) file | +| `TRAPS_CONFIG_FILE_ABSOLUTE_PATH` | Absolute path to [traps-config.yaml](./5-traps-configuration.md) file | +| `INVENTORY_FILE_ABSOLUTE_PATH` | Absolute path to [inventory.csv](./3-inventory-configuration.md) file | +| `COREFILE_ABS_PATH` | Absolute path to Corefile used by coreDNS. Default Corefile can be found inside the `docker_compose` | +| `COREDNS_ADDRESS` | IP address of the coredns inside docker network. Shouldn’t be changed | +| `SC4SNMP_VERSION` | Version of SC4SNMP | + +## Images of dependencies + +| Variable | Description | +|-------------------|--------------------------------------| +| `COREDNS_IMAGE` | Registry and name of Coredns image | +| `COREDNS_TAG` | Coredns image tag to pull | +| `MIBSERVER_IMAGE` | Registry and name of Mibserver image | +| `MIBSERVER_TAG` | Mibserver image tag to pull | +| `REDIS_IMAGE` | Registry and name of Redis image | +| `REDIS_TAG` | Redis image tag to pull | +| `MONGO_IMAGE` | Registry and name of MongoDB image | +| `MONGO_TAG` | MongoDB image tag to pull | + +## Splunk instance + +| Variable | Description | +|-------------------------------------|----------------------------------------------------------------------------------------------------------------------| +| `SPLUNK_HEC_HOST` | IP address or a domain name of a Splunk instance to send data to | +| `SPLUNK_HEC_PROTOCOL` | The protocol of the HEC endpoint: `https` or `http` | +| `SPLUNK_HEC_PORT` | The port of the HEC endpoint | +| `SPLUNK_HEC_TOKEN` | Splunk HTTP Event Collector token | +| `SPLUNK_HEC_INSECURESSL` | Whether to skip checking the certificate of the HEC endpoint when sending data over HTTPS | +| `SPLUNK_SOURCETYPE_TRAPS` | Splunk sourcetype for trap events | +| `SPLUNK_SOURCETYPE_POLLING_EVENTS` | Splunk sourcetype for non-metric polling events | +| `SPLUNK_SOURCETYPE_POLLING_METRICS` | Splunk sourcetype for metric polling events | +| `SPLUNK_HEC_INDEX_EVENTS` | Name of the Splunk event index | +| `SPLUNK_HEC_INDEX_METRICS` | Name of the Splunk metrics index | +| `SPLUNK_HEC_PATH` | Path for the HEC endpoint | +| `SPLUNK_AGGREGATE_TRAPS_EVENTS` | When set to true makes traps events collected as one event inside splunk | +| `IGNORE_EMPTY_VARBINDS` | Details can be found in [empty snmp response message issue](../bestpractices.md#empty-snmp-response-message-problem) | + +## Workers + +| Variable | Description | +|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------| +| `WALK_RETRY_MAX_INTERVAL` | Maximum time interval between walk attempts | +| `WALK_MAX_RETRIES` | Maximum number of walk retries | +| `METRICS_INDEXING_ENABLED` | Details can be found in [append oid index part to the metrics](../configuration/poller-configuration.md#append-oid-index-part-to-the-metrics) | +| `POLL_BASE_PROFILES` | Enable polling base profiles (with IF-MIB and SNMPv2-MIB) | +| `IGNORE_NOT_INCREASING_OIDS` | Ignoring `occurred: OID not increasing` issues for hosts specified in the array, ex: IGNORE_NOT_INCREASING_OIDS=127.0.0.1:164,127.0.0.6 | +| `WORKER_LOG_LEVEL` | Logging level of the workers, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | +| `UDP_CONNECTION_TIMEOUT` | Timeout in seconds for SNMP operations | +| `MAX_OID_TO_PROCESS` | Sometimes SNMP Agent cannot accept more than X OIDs per once, so if the error "TooBig" is visible in logs, decrease the number of MAX_OID_TO_PROCESS | +| `WORKER_POLLER_CONCURRENCY` | Minimum number of threads in the poller container | +| `WORKER_SENDER_CONCURRENCY` | Minimum number of threads in the sender container | +| `WORKER_TRAP_CONCURRENCY` | Minimum number of threads in the trap container | +| `PREFETCH_POLLER_COUNT` | How many tasks are consumed from the queue at once in the poller container | +| `PREFETCH_SENDER_COUNT` | How many tasks are consumed from the queue at once in the sender container | +| `PREFETCH_TRAP_COUNT` | How many tasks are consumed from the queue at once in the trap container | +| `RESOLVE_TRAP_ADDRESS` | Use reverse dns lookup for trap IP address and send the hostname to Splunk | +| `MAX_DNS_CACHE_SIZE_TRAPS` | If RESOLVE_TRAP_ADDRESS is set to true, this is the maximum number of records in cache | +| `TTL_DNS_CACHE_TRAPS` | If RESOLVE_TRAP_ADDRESS is set to true, this is the time to live of the cached record in seconds | + +## Inventory + +| Variable | Description | +|------------------------------|---------------------------------------------------------------------------------------------------| +| `INVENTORY_LOG_LEVEL` | Logging level of the inventory, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | +| `CHAIN_OF_TASKS_EXPIRY_TIME` | Tasks expirations time in seconds | + +## Traps + +| Variable | Description | +|------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `SNMP_V3_SECURITY_ENGINE_ID` | SNMPv3 TRAPs require the configuration SNMP Engine ID of the TRAP sending application for the USM users table of the TRAP receiving application for each USM user, for example: SNMP_V3_SECURITY_ENGINE_ID=80003a8c04,aab123456 | +| `TRAPS_PORT` | External port exposed for traps server | + +## Scheduler + +| Variable | Description | +|-----------------------|---------------------------------------------------------------------------------------------------| +| `SCHEDULER_LOG_LEVEL` | Logging level of the scheduler, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | \ No newline at end of file diff --git a/docs/dockercompose/7-snmpv3-secrets.md b/docs/dockercompose/7-snmpv3-secrets.md new file mode 100644 index 000000000..714f8ea39 --- /dev/null +++ b/docs/dockercompose/7-snmpv3-secrets.md @@ -0,0 +1,78 @@ +# SNMPv3 secrets + +Creating a secret requires updating configuration of several docker compose files. To simplify this process, inside the +`docker_compose` package there is a `manage_secrets.py` file which will automatically manage secrets. + +## Creating a new secret + +To create a new secret, `manage_secrets.py` must be run with the following flags: + +| Flag | Description | +|---------------------|--------------------------------------------------------------------------------| +| `--secret_name` | New secret name | +| `--path_to_compose` | Absolute path to directory with docker compose files | +| `--worker_poller` | \[OPTIONAL\] Add new secrets to worker poller. Default value is set to 'true'. | +| `--traps` | \[OPTIONAL\] Add new secrets to traps server. Default value is set to 'true'. | +| `--userName` | SNMPv3 userName | +| `--privProtocol` | SNMPv3 privProtocol | +| `--privKey` | SNMPv3 privKey | +| `--authProtocol` | SNMPv3 authProtocol | +| `--authKey` | SNMPv3 authKey | +| `--contextEngineId` | \[OPTIONAL\] SNMPv3 engine id | + + +This script, apart from updating configuration files, creates environmental variables with values of the secret at the +end of the `.env` file in the `docker_compose` directory. To apply those secrets run the +`sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d` command inside the `docker_compose` directory. After execution of the command, plain text secrets +from the `.env` file can be deleted. +> **_NOTE:_** In case of any changes in `.env`, the secrets must be recreated by [deleting](#deleting-a-secret) any +> previously existing secrets and creating them once again. Changes in `.env` include creating new secrets. + +### Example of creating a secret +```shell +python3 --path_to_compose \ +--secret_name my_secret \ +--userName r-wuser \ +--privProtocol AES \ +--privKey admin1234 \ +--authProtocol SHA \ +--authKey admin1234 \ +--contextEngineId 090807060504037 +``` + +Inside `docker_compose` directory run: + +```shell +sudo docker compose $(find docker* | sed -e 's/^/-f /') up -d +``` + +Now, the following lines from the `.env` can be deleted: + +```.env +my_secret_userName=r-wuser +my_secret_privProtocol=AES +my_secret_privKey=admin1234 +my_secret_authProtocol=SHA +my_secret_authKey=admin1234 +my_secret_contextEngineId=090807060504037 +``` + +## Deleting a secret + +To delete a secret, `manage_secrets.py` must be run with the following flags: + +| Flag | Description | +|---------------------|------------------------------------------------------| +| `--secret_name` | Secret name | +| `--path_to_compose` | Absolute path to directory with docker compose files | +| `--delete` | Set this flag to true to delete the secret | + +This will delete the secret with a given name from all docker compose files. If this secret hasn't been deleted from `.env` +file, it will be removed from there. + +### Example of deleting a secret +```shell +python3 --path_to_compose \ +--secret_name my_secret \ +--delete true +``` \ No newline at end of file diff --git a/docs/dockercompose/8-offline-installation.md b/docs/dockercompose/8-offline-installation.md new file mode 100644 index 000000000..8bca20edf --- /dev/null +++ b/docs/dockercompose/8-offline-installation.md @@ -0,0 +1,57 @@ +# Offline installation + +In order to install SC4SNMP using docker compose in the offline environment, several docker images must be imported. +These images can be found in `.env` file: + +- `SC4SNMP_IMAGE` and `SC4SNMP_TAG` in `Deployment configuration` section +- `COREDNS_IMAGE` and `COREDNS_TAG` in `Dependencies images` section +- `MIBSERVER_IMAGE` and `MIBSERVER_TAG` in `Dependencies images` section +- `REDIS_IMAGE` and `REDIS_TAG` in `Dependencies images` section +- `MONGO_IMAGE` and `MONGO_TAG` in `Dependencies images` section + +Following images must be downloaded in the online environment, saved to `.tar` archive and moved to the offline environment. + +## Steps required to install necessary images + +Suppose that `.env` contains the following images: + +```.env +SC4SNMP_IMAGE=ghcr.io/splunk/splunk-connect-for-snmp/container +SC4SNMP_TAG=latest + +COREDNS_IMAGE=coredns/coredns +COREDNS_TAG=1.11.1 + +MIBSERVER_IMAGE=ghcr.io/pysnmp/mibs/container +MIBSERVER_TAG=latest + +REDIS_IMAGE=docker.io/bitnami/redis +REDIS_TAG=7.2.1-debian-11-r0 + +MONGO_IMAGE=docker.io/bitnami/mongodb +MONGO_TAG=6.0.9-debian-11-r5 +``` + +They must be downloaded in the online environment by following commands: + +```shell +docker pull ghcr.io/splunk/splunk-connect-for-snmp/container:latest +docker pull coredns/coredns:1.11.1 +docker pull ghcr.io/pysnmp/mibs/container:latest +docker pull docker.io/bitnami/redis:7.2.1-debian-11-r0 +docker pull docker.io/bitnami/mongodb:6.0.9-debian-11-r5 +``` + +Next step is to save them to `sc4snmp_offline_images.tar` archive: +```shell +docker save ghcr.io/splunk/splunk-connect-for-snmp/container:latest \ +coredns/coredns:1.11.1 \ +ghcr.io/pysnmp/mibs/container:latest \ +docker.io/bitnami/redis:7.2.1-debian-11-r0 \ +docker.io/bitnami/mongodb:6.0.9-debian-11-r5 > sc4snmp_offline_images.tar +``` + +After moving `sc4snmp_offline_images.tar` archive to the offline environment, images can be loaded to docker: +```shell +docker load --input sc4snmp_offline_images.tar +``` \ No newline at end of file diff --git a/docs/gettingstarted/mk8s/k8s-microk8s.md b/docs/gettingstarted/mk8s/k8s-microk8s.md index 7d8084bf4..ba87eddd7 100644 --- a/docs/gettingstarted/mk8s/k8s-microk8s.md +++ b/docs/gettingstarted/mk8s/k8s-microk8s.md @@ -22,7 +22,7 @@ in the MicroK8s [documentation](https://microk8s.io/docs), including offline and ## Install MicroK8s using Snap ```bash -sudo snap install microk8s --classic --channel=1.25/stable +sudo snap install microk8s --classic --channel=1.30/stable ``` Add a user to the microk8s group so the `sudo` command is no longer necessary: diff --git a/docs/gettingstarted/sc4snmp-installation.md b/docs/gettingstarted/sc4snmp-installation.md index 0a8e709c8..b63195441 100644 --- a/docs/gettingstarted/sc4snmp-installation.md +++ b/docs/gettingstarted/sc4snmp-installation.md @@ -161,9 +161,14 @@ index="netops" sourcetype="sc4snmp:event" ``` bash | mpreview index="netmetrics" | search sourcetype="sc4snmp:metric" ``` - -NOTE: Before polling starts, SC4SNMP must perform the SNMP WALK process on the device. It is run the first time after configuring the new device, and then during the run time in every `walk_interval`. -Its purpose is to gather all the data and provide meaningful context for the polling records. For example, it might report that your device is so large that the walk takes too long, so the scope of walking needs to be limited. +BETA: Default walk profile is polling only `SNMPv2-MIB`. +To enable full oid tree polling see [poller configuration](../../configuration/poller-configuration/#poller-configuration-file). + +NOTE: Before polling starts, SC4SNMP must perform the SNMP WALK process on the device. +It is run the first time after configuring the new device, and then during the run time in every +`walk_interval`. Its purpose is to gather all the data and provide meaningful context for the polling records. +For example, it might report that your device is so large that the walk takes too long, +so the scope of walking needs to be limited. In such cases, enable the small walk. See [walk takes too much time](../../bestpractices/#walking-a-device-takes-too-much-time). When the walk finishes, events appear in Splunk. @@ -183,6 +188,27 @@ To uninstall SC4SNMP run the following commands: microk8s kubectl delete pvc --all -n sc4snmp ``` +Example of pods terminating: + +``` +NAME READY STATUS RESTARTS AGE +snmp-mibserver-bb8994c64-twk42 1/1 Terminating 2 (5h21m ago) 46h +snmp-splunk-connect-for-snmp-worker-sender-7f5557678b-psj97 1/1 Terminating 1 (5h21m ago) 22h +snmp-splunk-connect-for-snmp-worker-trap-dfcc487c-lh2dl 1/1 Terminating 1 (5h21m ago) 22h +snmp-splunk-connect-for-snmp-worker-trap-dfcc487c-5z5sq 1/1 Terminating 1 (5h21m ago) 22h +snmp-splunk-connect-for-snmp-trap-684d57dc8d-722tv 1/1 Terminating 1 (5h21m ago) 22h +snmp-splunk-connect-for-snmp-trap-684d57dc8d-z68lb 1/1 Terminating 1 (5h21m ago) 22h +``` + +## Restart Splunk Connect for SNMP +First run the command to uninstall SC4SNMP, wait until all pods are removed, then use the command to install sc4snmp again. + +``` + microk8s helm3 uninstall snmp -n sc4snmp + microk8s kubectl delete pvc --all -n sc4snmp + microk8s helm3 install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace +``` + [examples_link]: https://github.com/splunk/splunk-connect-for-snmp/tree/main/examples [basic_template_link]: https://github.com/splunk/splunk-connect-for-snmp/blob/main/examples/basic_template.md diff --git a/docs/gui/enable-gui.md b/docs/gui/enable-gui.md index bd4fe3628..497ba9806 100644 --- a/docs/gui/enable-gui.md +++ b/docs/gui/enable-gui.md @@ -19,11 +19,23 @@ UI: valuesFileName: "" keepSectionFiles: true ``` - - `NodePort`: port number on which GUI will be accessible. It has to be from a range `30000-32767`. - `pullPolicy`: [kubernetes pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) - `valuesFileDirectory`: this is an obligatory field if UI is used. It is an absolute directory path on the host machine where configuration files from the GUI will be generated. It is used to keep all the changes from the GUI so that users can easily switch back from using UI to the current sc4snmp version. It is advised to create new folder for those files, because this directory is mounted to the Kubernetes pod and GUI application has full write access to this directory. - `valuesFileName`: [OPTIONAL] full name of the file with configuration (e.g. `values.yaml`) that is stored inside the `valuesFileDirectory` directory. If this file name is provided, and it exists in this directory, then GUI will update appropriate sections in provided `values.yaml` file. If this file name is not provided, or provided file name can’t be found inside `valuesFileDirectory` then inside that directory there will be created three files with the latest GUI configuration of groups, profiles and inventory. Those configuration can be copied and pasted to the appropriate sections in the original `values.yaml` file. + + Template of initial `values.yaml`: + ```yaml + scheduler: + profiles: | + + groups: | + + poller: + inventory: |- + ``` + > This part of configuration can be also pasted to the `values.yaml` used for SC4SNMP installation. + - `keepSectionFiles`: if valid `valuesFileName` was provided then by setting this variable to `true` or `false` user can decide whether to keep additional files with configuration of groups, profiles and inventory. If valid `valuesFileName` was NOT provided, then those files are created regardless of this variable. diff --git a/integration_tests/install_microk8s.sh b/integration_tests/install_microk8s.sh index cf5b8a4f8..7724836c8 100755 --- a/integration_tests/install_microk8s.sh +++ b/integration_tests/install_microk8s.sh @@ -8,7 +8,7 @@ setup_kube_roles() { } install_dependencies_on_ubuntu() { - sudo snap install microk8s --classic --channel=1.25/edge + sudo snap install microk8s --classic --channel=1.30/stable sudo snap install docker sudo apt-get install snmp -y sudo apt-get install python3-dev -y diff --git a/integration_tests/splunk_test_utils.py b/integration_tests/splunk_test_utils.py index 000096f00..2071cd521 100644 --- a/integration_tests/splunk_test_utils.py +++ b/integration_tests/splunk_test_utils.py @@ -51,6 +51,13 @@ def splunk_single_search(service, search): inventory_template = """poller: + enableFullWalk: true + inventory: | + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete +""" + +inventory_template_no_walk = """poller: + enableFullWalk: false inventory: | address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete """ @@ -77,6 +84,7 @@ def splunk_single_search(service, search): TEMPLATE_MAPPING = { "inventory.yaml": inventory_template, + "inventory2.yaml": inventory_template_no_walk, "profiles.yaml": profiles_template, "scheduler_secrets.yaml": poller_secrets_template, "traps_secrets.yaml": traps_secrets_template, diff --git a/integration_tests/test_poller_integration.py b/integration_tests/test_poller_integration.py index f74492e1b..821b6004e 100644 --- a/integration_tests/test_poller_integration.py +++ b/integration_tests/test_poller_integration.py @@ -379,12 +379,12 @@ def setup_small_walk(request): }, } update_profiles(profile) - update_file([f"{trap_external_ip},,2c,public,,,20,walk1,f,"], "inventory.yaml") - upgrade_helm(["inventory.yaml", "profiles.yaml"]) + update_file([f"{trap_external_ip},,2c,public,,,20,walk1,f,"], "inventory2.yaml") + upgrade_helm(["inventory2.yaml", "profiles.yaml"]) time.sleep(30) yield - update_file([f"{trap_external_ip},,2c,public,,,20,walk1,f,t"], "inventory.yaml") - upgrade_helm(["inventory.yaml"]) + update_file([f"{trap_external_ip},,2c,public,,,20,walk1,f,t"], "inventory2.yaml") + upgrade_helm(["inventory2.yaml"]) time.sleep(20) @@ -410,6 +410,89 @@ def test_check_if_walk_scope_was_smaller(self, setup_splunk): assert metric_count > 0 +@pytest.fixture +def setup_small_walk_with_full_walk_enabled(request): + trap_external_ip = request.config.getoption("trap_external_ip") + profile = { + "walk1": { + "condition": {"type": "walk"}, + "varBinds": [yaml_escape_list(sq("IP-MIB"))], + }, + } + update_profiles(profile) + update_file([f"{trap_external_ip},,2c,public,,,20,walk1,f,"], "inventory.yaml") + upgrade_helm(["inventory.yaml", "profiles.yaml"]) + time.sleep(20) + yield + update_file([f"{trap_external_ip},,2c,public,,,20,walk1,f,t"], "inventory.yaml") + upgrade_helm(["inventory.yaml"]) + time.sleep(20) + + +@pytest.mark.usefixtures("setup_small_walk_with_full_walk_enabled") +class TestSmallWalkWithFullWalkEnabled: + def test_check_if_full_walk_is_done_with_profile_set(self, setup_splunk): + time.sleep(20) + search_string = ( + """| mpreview index=netmetrics earliest=-40s | search "TCP-MIB" """ + ) + result_count, metric_count = run_retried_single_search( + setup_splunk, search_string, 1 + ) + assert result_count > 0 + assert metric_count > 0 + search_string = ( + """| mpreview index=netmetrics earliest=-40s | search "IP-MIB" """ + ) + result_count, metric_count = run_retried_single_search( + setup_splunk, search_string, 2 + ) + assert result_count > 0 + assert metric_count > 0 + + +@pytest.fixture +def setup_walk(request): + trap_external_ip = request.config.getoption("trap_external_ip") + update_file([f"{trap_external_ip},,2c,public,,,20,,f,"], "inventory2.yaml") + upgrade_helm(["inventory2.yaml", "profiles.yaml"]) + time.sleep(30) + yield + update_file([f"{trap_external_ip},,2c,public,,,20,,f,t"], "inventory2.yaml") + upgrade_helm(["inventory2.yaml"]) + time.sleep(20) + + +@pytest.mark.usefixtures("setup_walk") +class TestPartialWalk: + def test_check_if_partial_walk_is_done(self, setup_splunk): + time.sleep(20) + search_string = ( + """| mpreview index=netmetrics earliest=-20s | search "TCP-MIB" """ + ) + result_count, metric_count = run_retried_single_search( + setup_splunk, search_string, 1 + ) + assert result_count == 0 + assert metric_count == 0 + search_string = ( + """| mpreview index=netmetrics earliest=-20s | search "IP-MIB" """ + ) + result_count, metric_count = run_retried_single_search( + setup_splunk, search_string, 2 + ) + assert result_count == 0 + assert metric_count == 0 + search_string = ( + """| mpreview index=netmetrics earliest=-20s | search "SNMPv2-MIB" """ + ) + result_count, metric_count = run_retried_single_search( + setup_splunk, search_string, 2 + ) + assert result_count > 0 + assert metric_count > 0 + + @pytest.fixture() def setup_v3_connection(request): trap_external_ip = request.config.getoption("trap_external_ip") diff --git a/integration_tests/values.yaml b/integration_tests/values.yaml index b92b71770..663f02a91 100644 --- a/integration_tests/values.yaml +++ b/integration_tests/values.yaml @@ -65,6 +65,7 @@ scheduler: groups: | {} poller: + enableFullWalk: true usernameSecrets: - sv3poller # - sc4snmp-hlab-sha-aes diff --git a/mkdocs.yml b/mkdocs.yml index acff02b53..eebb44a8d 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,3 +1,4 @@ +--- site_name: Splunk Connect for SNMP (SC4SNMP) extra: @@ -6,7 +7,7 @@ extra: markdown_extensions: - toc: - permalink: True + permalink: true - smarty - fenced_code - sane_lists @@ -15,13 +16,13 @@ markdown_extensions: - md_in_html plugins: - - search: - lang: en - - mkdocs-video: - is_video: True + - search: + lang: en + - mkdocs-video: + is_video: true extra_javascript: - - javascripts/footer.js + - javascripts/footer.js copyright:

Webpages built on GitHub Pages | Github Terms | GitHub Privacy

@@ -30,7 +31,7 @@ theme: palette: primary: "black" accent: "orange" - #favicon: "logo.png" +# favicon: "logo.png" # logo: "logo.png" nav: @@ -43,12 +44,12 @@ nav: - Configuration: - Deployment: "configuration/deployment-configuration.md" - Polling: - - Poller: "configuration/poller-configuration.md" - - Scheduler: "configuration/scheduler-configuration.md" - - Configuring Profiles: "configuration/configuring-profiles.md" - - Configuring Groups: "configuration/configuring-groups.md" - - Step by Step polling example: "configuration/step-by-step-poll.md" - - SNMP data format: "configuration/snmp-data-format.md" + - Poller: "configuration/poller-configuration.md" + - Scheduler: "configuration/scheduler-configuration.md" + - Configuring Profiles: "configuration/configuring-profiles.md" + - Configuring Groups: "configuration/configuring-groups.md" + - Step by Step polling example: "configuration/step-by-step-poll.md" + - SNMP data format: "configuration/snmp-data-format.md" - Traps: "configuration/trap-configuration.md" - Worker: "configuration/worker-configuration.md" - Mongo DB: "configuration/mongo-configuration.md" @@ -66,13 +67,21 @@ nav: - Configuring Groups: "gui/groups-gui.md" - Configuring Inventory: "gui/inventory-gui.md" - Apply changes: "gui/apply-changes.md" + - Docker compose: + - Install Docker: "dockercompose/1-install-docker.md" + - Download package with docker compose files: "dockercompose/2-download-package.md" + - Inventory configuration: "dockercompose/3-inventory-configuration.md" + - Scheduler configuration: "dockercompose/4-scheduler-configuration.md" + - Traps configuration: "dockercompose/5-traps-configuration.md" + - .env file configuration: "dockercompose/6-env-file-configuration.md" + - SNMPv3 secrets configuration: "dockercompose/7-snmpv3-secrets.md" + - Offline installation: "dockercompose/8-offline-installation.md" - Lightweight installation: "small-environment.md" - Planning: "planning.md" - Security: "security.md" - Request MIB: "mib-request.md" - Upgrade SC4SNMP: "upgrade.md" - - Troubleshooting : "bestpractices.md" + - Troubleshooting: "bestpractices.md" - Releases: "releases.md" - High Availability: ha.md - Docker compose deployment: "docker-compose.md" - diff --git a/poetry.lock b/poetry.lock index 345e3f81a..9f8219199 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "amqp" @@ -382,18 +382,23 @@ dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version [[package]] name = "dnspython" -version = "1.16.0" +version = "2.6.1" description = "DNS toolkit" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.8" files = [ - {file = "dnspython-1.16.0-py2.py3-none-any.whl", hash = "sha256:f69c21288a962f4da86e56c4905b49d11aba7938d3d740e80d9e366ee4f1632d"}, - {file = "dnspython-1.16.0.zip", hash = "sha256:36c5e8e38d4369a08b6780b7f27d790a292b2b08eea01607865bf0936c558e01"}, + {file = "dnspython-2.6.1-py3-none-any.whl", hash = "sha256:5ef3b9680161f6fa89daf8ad451b5f1a33b18ae8a1c6778cdf4b43f08c0a6e50"}, + {file = "dnspython-2.6.1.tar.gz", hash = "sha256:e8f0f9c23a7b7cb99ded64e6c3a6f3e701d78f50c55e002b839dea7225cff7cc"}, ] [package.extras] -dnssec = ["ecdsa (>=0.13)", "pycryptodome"] -idna = ["idna (>=2.1)"] +dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "sphinx (>=7.2.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"] +dnssec = ["cryptography (>=41)"] +doh = ["h2 (>=4.1.0)", "httpcore (>=1.0.0)", "httpx (>=0.26.0)"] +doq = ["aioquic (>=0.9.25)"] +idna = ["idna (>=3.6)"] +trio = ["trio (>=0.23)"] +wmi = ["wmi (>=1.5.1)"] [[package]] name = "exceptiongroup" @@ -428,13 +433,13 @@ dev = ["flake8", "markdown", "twine", "wheel"] [[package]] name = "idna" -version = "3.3" +version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ - {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, - {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] [[package]] @@ -487,13 +492,13 @@ files = [ [[package]] name = "jinja2" -version = "3.1.2" +version = "3.1.3" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, + {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, + {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, ] [package.dependencies] @@ -788,13 +793,13 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp [[package]] name = "mkdocs-material" -version = "9.5.2" +version = "9.5.18" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.2-py3-none-any.whl", hash = "sha256:6ed0fbf4682491766f0ec1acc955db6901c2fd424c7ab343964ef51b819741f5"}, - {file = "mkdocs_material-9.5.2.tar.gz", hash = "sha256:ca8b9cd2b3be53e858e5a1a45ac9668bd78d95d77a30288bb5ebc1a31db6184c"}, + {file = "mkdocs_material-9.5.18-py3-none-any.whl", hash = "sha256:1e0e27fc9fe239f9064318acf548771a4629d5fd5dfd45444fd80a953fe21eb4"}, + {file = "mkdocs_material-9.5.18.tar.gz", hash = "sha256:a43f470947053fa2405c33995f282d24992c752a50114f23f30da9d8d0c57e62"}, ] [package.dependencies] @@ -802,7 +807,7 @@ babel = ">=2.10,<3.0" colorama = ">=0.4,<1.0" jinja2 = ">=3.0,<4.0" markdown = ">=3.2,<4.0" -mkdocs = ">=1.5.3,<2.0" +mkdocs = ">=1.5.3,<1.6.0" mkdocs-material-extensions = ">=1.3,<2.0" paginate = ">=0.5,<1.0" pygments = ">=2.16,<3.0" @@ -811,8 +816,8 @@ regex = ">=2022.4" requests = ">=2.26,<3.0" [package.extras] -git = ["mkdocs-git-committers-plugin-2 (>=1.1,<2.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2,<2.0)"] -imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=9.4,<10.0)"] +git = ["mkdocs-git-committers-plugin-2 (>=1.1,<2.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"] +imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=10.2,<11.0)"] recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2.0)", "mkdocs-rss-plugin (>=1.6,<2.0)"] [[package]] @@ -843,13 +848,13 @@ mkdocs = ">=1.1.0,<2" [[package]] name = "mongoengine" -version = "0.27.0" +version = "0.28.2" description = "MongoEngine is a Python Object-Document Mapper for working with MongoDB." optional = false python-versions = ">=3.7" files = [ - {file = "mongoengine-0.27.0-py3-none-any.whl", hash = "sha256:c3523b8f886052f3deb200b3218bcc13e4b781661e3bea38587cc936c80ea358"}, - {file = "mongoengine-0.27.0.tar.gz", hash = "sha256:8f38df7834dc4b192d89f2668dcf3091748d12f74d55648ce77b919167a4a49b"}, + {file = "mongoengine-0.28.2-py3-none-any.whl", hash = "sha256:8e0f84a5ad3d335e5da98261454d4ab546c866241ed064adc6433fe2077d43c9"}, + {file = "mongoengine-0.28.2.tar.gz", hash = "sha256:67c35a2ebe0ee7fd8eda3766dc251b9e0aada4489bb935f7a55b4c570d148ca7"}, ] [package.dependencies] @@ -870,18 +875,18 @@ pymongo = ">=2.6.0" [[package]] name = "opentelemetry-api" -version = "1.21.0" +version = "1.24.0" description = "OpenTelemetry Python API" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_api-1.21.0-py3-none-any.whl", hash = "sha256:4bb86b28627b7e41098f0e93280fe4892a1abed1b79a19aec6f928f39b17dffb"}, - {file = "opentelemetry_api-1.21.0.tar.gz", hash = "sha256:d6185fd5043e000075d921822fd2d26b953eba8ca21b1e2fa360dd46a7686316"}, + {file = "opentelemetry_api-1.24.0-py3-none-any.whl", hash = "sha256:0f2c363d98d10d1ce93330015ca7fd3a65f60be64e05e30f557c61de52c80ca2"}, + {file = "opentelemetry_api-1.24.0.tar.gz", hash = "sha256:42719f10ce7b5a9a73b10a4baf620574fb8ad495a9cbe5c18d76b75d8689c67e"}, ] [package.dependencies] deprecated = ">=1.2.6" -importlib-metadata = ">=6.0,<7.0" +importlib-metadata = ">=6.0,<=7.0" [[package]] name = "opentelemetry-exporter-jaeger-thrift" @@ -901,13 +906,13 @@ thrift = ">=0.10.0" [[package]] name = "opentelemetry-instrumentation" -version = "0.42b0" +version = "0.45b0" description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_instrumentation-0.42b0-py3-none-any.whl", hash = "sha256:65ae54ddb90ca2d05d2d16bf6863173e7141eba1bbbf41fc9bbb02446adbe369"}, - {file = "opentelemetry_instrumentation-0.42b0.tar.gz", hash = "sha256:6a653a1fed0f76eea32885321d77c750483e987eeefa4cbf219fc83559543198"}, + {file = "opentelemetry_instrumentation-0.45b0-py3-none-any.whl", hash = "sha256:06c02e2c952c1b076e8eaedf1b82f715e2937ba7eeacab55913dd434fbcec258"}, + {file = "opentelemetry_instrumentation-0.45b0.tar.gz", hash = "sha256:6c47120a7970bbeb458e6a73686ee9ba84b106329a79e4a4a66761f933709c7e"}, ] [package.dependencies] @@ -917,67 +922,63 @@ wrapt = ">=1.0.0,<2.0.0" [[package]] name = "opentelemetry-instrumentation-celery" -version = "0.42b0" +version = "0.45b0" description = "OpenTelemetry Celery Instrumentation" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_instrumentation_celery-0.42b0-py3-none-any.whl", hash = "sha256:25a1c2fc35ee4f4c87c855c9b09af09b0084cba796d3b2972586bb64ef23b6dc"}, - {file = "opentelemetry_instrumentation_celery-0.42b0.tar.gz", hash = "sha256:1b6a55c1f2bd193737643e736aa85988b8522fdbff7ec934edc34ee59257fa5d"}, + {file = "opentelemetry_instrumentation_celery-0.45b0-py3-none-any.whl", hash = "sha256:0c769357dc95b44020897b7c5d79a46146956bf4914c5bce65ad9af7eebe0c48"}, + {file = "opentelemetry_instrumentation_celery-0.45b0.tar.gz", hash = "sha256:0d0ea225a2cdaf6ae6ea0856bed75a232d65cc552ee57c79afe1d067f2a9ccdd"}, ] [package.dependencies] opentelemetry-api = ">=1.12,<2.0" -opentelemetry-instrumentation = "0.42b0" -opentelemetry-semantic-conventions = "0.42b0" +opentelemetry-instrumentation = "0.45b0" +opentelemetry-semantic-conventions = "0.45b0" [package.extras] instruments = ["celery (>=4.0,<6.0)"] -test = ["opentelemetry-instrumentation-celery[instruments]", "opentelemetry-test-utils (==0.42b0)", "pytest"] [[package]] name = "opentelemetry-instrumentation-logging" -version = "0.42b0" +version = "0.45b0" description = "OpenTelemetry Logging instrumentation" optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_instrumentation_logging-0.42b0-py2.py3-none-any.whl", hash = "sha256:d504103ddfd260e11f0c07d8c0f0ca25694c3c0a96dd0902239baa9790b12c29"}, - {file = "opentelemetry_instrumentation_logging-0.42b0.tar.gz", hash = "sha256:222922cb666bcada986db5b3574656589dbd118ddf976a0f1c75098f2d8fb40f"}, + {file = "opentelemetry_instrumentation_logging-0.45b0-py3-none-any.whl", hash = "sha256:bfaaca6862e84bb41b434178fba69afdb622f226cfdee243acb3959b65c97b48"}, + {file = "opentelemetry_instrumentation_logging-0.45b0.tar.gz", hash = "sha256:48bfb6161a09f210c28a30295c4d217c4703e2d05d1df04fd3ab19ea30837978"}, ] [package.dependencies] opentelemetry-api = ">=1.12,<2.0" -opentelemetry-instrumentation = "0.42b0" - -[package.extras] -test = ["opentelemetry-test-utils (==0.42b0)"] +opentelemetry-instrumentation = "0.45b0" [[package]] name = "opentelemetry-sdk" -version = "1.21.0" +version = "1.24.0" description = "OpenTelemetry Python SDK" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_sdk-1.21.0-py3-none-any.whl", hash = "sha256:9fe633243a8c655fedace3a0b89ccdfc654c0290ea2d8e839bd5db3131186f73"}, - {file = "opentelemetry_sdk-1.21.0.tar.gz", hash = "sha256:3ec8cd3020328d6bc5c9991ccaf9ae820ccb6395a5648d9a95d3ec88275b8879"}, + {file = "opentelemetry_sdk-1.24.0-py3-none-any.whl", hash = "sha256:fa731e24efe832e98bcd90902085b359dcfef7d9c9c00eb5b9a18587dae3eb59"}, + {file = "opentelemetry_sdk-1.24.0.tar.gz", hash = "sha256:75bc0563affffa827700e0f4f4a68e1e257db0df13372344aebc6f8a64cde2e5"}, ] [package.dependencies] -opentelemetry-api = "1.21.0" -opentelemetry-semantic-conventions = "0.42b0" +opentelemetry-api = "1.24.0" +opentelemetry-semantic-conventions = "0.45b0" typing-extensions = ">=3.7.4" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.42b0" +version = "0.45b0" description = "OpenTelemetry Semantic Conventions" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_semantic_conventions-0.42b0-py3-none-any.whl", hash = "sha256:5cd719cbfec448af658860796c5d0fcea2fdf0945a2bed2363f42cb1ee39f526"}, - {file = "opentelemetry_semantic_conventions-0.42b0.tar.gz", hash = "sha256:44ae67a0a3252a05072877857e5cc1242c98d4cf12870159f1a94bec800d38ec"}, + {file = "opentelemetry_semantic_conventions-0.45b0-py3-none-any.whl", hash = "sha256:a4a6fb9a7bacd9167c082aa4681009e9acdbfa28ffb2387af50c2fef3d30c864"}, + {file = "opentelemetry_semantic_conventions-0.45b0.tar.gz", hash = "sha256:7c84215a44ac846bc4b8e32d5e78935c5c43482e491812a0bb8aaf87e4d92118"}, ] [[package]] @@ -1088,89 +1089,88 @@ wcwidth = "*" [[package]] name = "pycryptodomex" -version = "3.17" +version = "3.20.0" description = "Cryptographic library for Python" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ - {file = "pycryptodomex-3.17-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:12056c38e49d972f9c553a3d598425f8a1c1d35b2e4330f89d5ff1ffb70de041"}, - {file = "pycryptodomex-3.17-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab33c2d9f275e05e235dbca1063753b5346af4a5cac34a51fa0da0d4edfb21d7"}, - {file = "pycryptodomex-3.17-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:caa937ff29d07a665dfcfd7a84f0d4207b2ebf483362fa9054041d67fdfacc20"}, - {file = "pycryptodomex-3.17-cp27-cp27m-manylinux2014_aarch64.whl", hash = "sha256:db23d7341e21b273d2440ec6faf6c8b1ca95c8894da612e165be0b89a8688340"}, - {file = "pycryptodomex-3.17-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:f854c8476512cebe6a8681cc4789e4fcff6019c17baa0fd72b459155dc605ab4"}, - {file = "pycryptodomex-3.17-cp27-cp27m-win32.whl", hash = "sha256:a57e3257bacd719769110f1f70dd901c5b6955e9596ad403af11a3e6e7e3311c"}, - {file = "pycryptodomex-3.17-cp27-cp27m-win_amd64.whl", hash = "sha256:d38ab9e53b1c09608ba2d9b8b888f1e75d6f66e2787e437adb1fecbffec6b112"}, - {file = "pycryptodomex-3.17-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:3c2516b42437ae6c7a29ef3ddc73c8d4714e7b6df995b76be4695bbe4b3b5cd2"}, - {file = "pycryptodomex-3.17-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:5c23482860302d0d9883404eaaa54b0615eefa5274f70529703e2c43cc571827"}, - {file = "pycryptodomex-3.17-cp27-cp27mu-manylinux2014_aarch64.whl", hash = "sha256:7a8dc3ee7a99aae202a4db52de5a08aa4d01831eb403c4d21da04ec2f79810db"}, - {file = "pycryptodomex-3.17-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:7cc28dd33f1f3662d6da28ead4f9891035f63f49d30267d3b41194c8778997c8"}, - {file = "pycryptodomex-3.17-cp35-abi3-macosx_10_9_universal2.whl", hash = "sha256:2d4d395f109faba34067a08de36304e846c791808524614c731431ee048fe70a"}, - {file = "pycryptodomex-3.17-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:55eed98b4150a744920597c81b3965b632038781bab8a08a12ea1d004213c600"}, - {file = "pycryptodomex-3.17-cp35-abi3-manylinux2014_aarch64.whl", hash = "sha256:7fa0b52df90343fafe319257b31d909be1d2e8852277fb0376ba89d26d2921db"}, - {file = "pycryptodomex-3.17-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78f0ddd4adc64baa39b416f3637aaf99f45acb0bcdc16706f0cc7ebfc6f10109"}, - {file = "pycryptodomex-3.17-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4fa037078e92c7cc49f6789a8bac3de06856740bb2038d05f2d9a2e4b165d59"}, - {file = "pycryptodomex-3.17-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:88b0d5bb87eaf2a31e8a759302b89cf30c97f2f8ca7d83b8c9208abe8acb447a"}, - {file = "pycryptodomex-3.17-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:6feedf4b0e36b395329b4186a805f60f900129cdf0170e120ecabbfcb763995d"}, - {file = "pycryptodomex-3.17-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:7a6651a07f67c28b6e978d63aa3a3fccea0feefed9a8453af3f7421a758461b7"}, - {file = "pycryptodomex-3.17-cp35-abi3-win32.whl", hash = "sha256:32e764322e902bbfac49ca1446604d2839381bbbdd5a57920c9daaf2e0b778df"}, - {file = "pycryptodomex-3.17-cp35-abi3-win_amd64.whl", hash = "sha256:4b51e826f0a04d832eda0790bbd0665d9bfe73e5a4d8ea93b6a9b38beeebe935"}, - {file = "pycryptodomex-3.17-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:d4cf0128da167562c49b0e034f09e9cedd733997354f2314837c2fa461c87bb1"}, - {file = "pycryptodomex-3.17-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:c92537b596bd5bffb82f8964cabb9fef1bca8a28a9e0a69ffd3ec92a4a7ad41b"}, - {file = "pycryptodomex-3.17-pp27-pypy_73-win32.whl", hash = "sha256:599bb4ae4bbd614ca05f49bd4e672b7a250b80b13ae1238f05fd0f09d87ed80a"}, - {file = "pycryptodomex-3.17-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4c4674f4b040321055c596aac926d12f7f6859dfe98cd12f4d9453b43ab6adc8"}, - {file = "pycryptodomex-3.17-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67a3648025e4ddb72d43addab764336ba2e670c8377dba5dd752e42285440d31"}, - {file = "pycryptodomex-3.17-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40e8a11f578bd0851b02719c862d55d3ee18d906c8b68a9c09f8c564d6bb5b92"}, - {file = "pycryptodomex-3.17-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:23d83b610bd97704f0cd3acc48d99b76a15c8c1540d8665c94d514a49905bad7"}, - {file = "pycryptodomex-3.17-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd29d35ac80755e5c0a99d96b44fb9abbd7e871849581ea6a4cb826d24267537"}, - {file = "pycryptodomex-3.17-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64b876d57cb894b31056ad8dd6a6ae1099b117ae07a3d39707221133490e5715"}, - {file = "pycryptodomex-3.17-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee8bf4fdcad7d66beb744957db8717afc12d176e3fd9c5d106835133881a049b"}, - {file = "pycryptodomex-3.17-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c84689c73358dfc23f9fdcff2cb9e7856e65e2ce3b5ed8ff630d4c9bdeb1867b"}, - {file = "pycryptodomex-3.17.tar.gz", hash = "sha256:0af93aad8d62e810247beedef0261c148790c52f3cd33643791cc6396dd217c1"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:645bd4ca6f543685d643dadf6a856cc382b654cc923460e3a10a49c1b3832aeb"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ff5c9a67f8a4fba4aed887216e32cbc48f2a6fb2673bb10a99e43be463e15913"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:8ee606964553c1a0bc74057dd8782a37d1c2bc0f01b83193b6f8bb14523b877b"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7805830e0c56d88f4d491fa5ac640dfc894c5ec570d1ece6ed1546e9df2e98d6"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:bc3ee1b4d97081260d92ae813a83de4d2653206967c4a0a017580f8b9548ddbc"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-win32.whl", hash = "sha256:8af1a451ff9e123d0d8bd5d5e60f8e3315c3a64f3cdd6bc853e26090e195cdc8"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-win_amd64.whl", hash = "sha256:cbe71b6712429650e3883dc81286edb94c328ffcd24849accac0a4dbcc76958a"}, + {file = "pycryptodomex-3.20.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:76bd15bb65c14900d98835fcd10f59e5e0435077431d3a394b60b15864fddd64"}, + {file = "pycryptodomex-3.20.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:653b29b0819605fe0898829c8ad6400a6ccde096146730c2da54eede9b7b8baa"}, + {file = "pycryptodomex-3.20.0-cp27-cp27mu-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62a5ec91388984909bb5398ea49ee61b68ecb579123694bffa172c3b0a107079"}, + {file = "pycryptodomex-3.20.0-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:108e5f1c1cd70ffce0b68739c75734437c919d2eaec8e85bffc2c8b4d2794305"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-macosx_10_9_universal2.whl", hash = "sha256:59af01efb011b0e8b686ba7758d59cf4a8263f9ad35911bfe3f416cee4f5c08c"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:82ee7696ed8eb9a82c7037f32ba9b7c59e51dda6f105b39f043b6ef293989cb3"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91852d4480a4537d169c29a9d104dda44094c78f1f5b67bca76c29a91042b623"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca649483d5ed251d06daf25957f802e44e6bb6df2e8f218ae71968ff8f8edc4"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e186342cfcc3aafaad565cbd496060e5a614b441cacc3995ef0091115c1f6c5"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:25cd61e846aaab76d5791d006497134602a9e451e954833018161befc3b5b9ed"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:9c682436c359b5ada67e882fec34689726a09c461efd75b6ea77b2403d5665b7"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:7a7a8f33a1f1fb762ede6cc9cbab8f2a9ba13b196bfaf7bc6f0b39d2ba315a43"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-win32.whl", hash = "sha256:c39778fd0548d78917b61f03c1fa8bfda6cfcf98c767decf360945fe6f97461e"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-win_amd64.whl", hash = "sha256:2a47bcc478741b71273b917232f521fd5704ab4b25d301669879e7273d3586cc"}, + {file = "pycryptodomex-3.20.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:1be97461c439a6af4fe1cf8bf6ca5936d3db252737d2f379cc6b2e394e12a458"}, + {file = "pycryptodomex-3.20.0-pp27-pypy_73-win32.whl", hash = "sha256:19764605feea0df966445d46533729b645033f134baeb3ea26ad518c9fdf212c"}, + {file = "pycryptodomex-3.20.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f2e497413560e03421484189a6b65e33fe800d3bd75590e6d78d4dfdb7accf3b"}, + {file = "pycryptodomex-3.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e48217c7901edd95f9f097feaa0388da215ed14ce2ece803d3f300b4e694abea"}, + {file = "pycryptodomex-3.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d00fe8596e1cc46b44bf3907354e9377aa030ec4cd04afbbf6e899fc1e2a7781"}, + {file = "pycryptodomex-3.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88afd7a3af7ddddd42c2deda43d53d3dfc016c11327d0915f90ca34ebda91499"}, + {file = "pycryptodomex-3.20.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d3584623e68a5064a04748fb6d76117a21a7cb5eaba20608a41c7d0c61721794"}, + {file = "pycryptodomex-3.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0daad007b685db36d977f9de73f61f8da2a7104e20aca3effd30752fd56f73e1"}, + {file = "pycryptodomex-3.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dcac11031a71348faaed1f403a0debd56bf5404232284cf8c761ff918886ebc"}, + {file = "pycryptodomex-3.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:69138068268127cd605e03438312d8f271135a33140e2742b417d027a0539427"}, + {file = "pycryptodomex-3.20.0.tar.gz", hash = "sha256:7a710b79baddd65b806402e14766c721aee8fb83381769c27920f26476276c1e"}, ] [[package]] name = "pydantic" -version = "1.10.13" +version = "1.10.15" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:efff03cc7a4f29d9009d1c96ceb1e7a70a65cfe86e89d34e4a5f2ab1e5693737"}, - {file = "pydantic-1.10.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ecea2b9d80e5333303eeb77e180b90e95eea8f765d08c3d278cd56b00345d01"}, - {file = "pydantic-1.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1740068fd8e2ef6eb27a20e5651df000978edce6da6803c2bef0bc74540f9548"}, - {file = "pydantic-1.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84bafe2e60b5e78bc64a2941b4c071a4b7404c5c907f5f5a99b0139781e69ed8"}, - {file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bc0898c12f8e9c97f6cd44c0ed70d55749eaf783716896960b4ecce2edfd2d69"}, - {file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:654db58ae399fe6434e55325a2c3e959836bd17a6f6a0b6ca8107ea0571d2e17"}, - {file = "pydantic-1.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:75ac15385a3534d887a99c713aa3da88a30fbd6204a5cd0dc4dab3d770b9bd2f"}, - {file = "pydantic-1.10.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c553f6a156deb868ba38a23cf0df886c63492e9257f60a79c0fd8e7173537653"}, - {file = "pydantic-1.10.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5e08865bc6464df8c7d61439ef4439829e3ab62ab1669cddea8dd00cd74b9ffe"}, - {file = "pydantic-1.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e31647d85a2013d926ce60b84f9dd5300d44535a9941fe825dc349ae1f760df9"}, - {file = "pydantic-1.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:210ce042e8f6f7c01168b2d84d4c9eb2b009fe7bf572c2266e235edf14bacd80"}, - {file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8ae5dd6b721459bfa30805f4c25880e0dd78fc5b5879f9f7a692196ddcb5a580"}, - {file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f8e81fc5fb17dae698f52bdd1c4f18b6ca674d7068242b2aff075f588301bbb0"}, - {file = "pydantic-1.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:61d9dce220447fb74f45e73d7ff3b530e25db30192ad8d425166d43c5deb6df0"}, - {file = "pydantic-1.10.13-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4b03e42ec20286f052490423682016fd80fda830d8e4119f8ab13ec7464c0132"}, - {file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f59ef915cac80275245824e9d771ee939133be38215555e9dc90c6cb148aaeb5"}, - {file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a1f9f747851338933942db7af7b6ee8268568ef2ed86c4185c6ef4402e80ba8"}, - {file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:97cce3ae7341f7620a0ba5ef6cf043975cd9d2b81f3aa5f4ea37928269bc1b87"}, - {file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854223752ba81e3abf663d685f105c64150873cc6f5d0c01d3e3220bcff7d36f"}, - {file = "pydantic-1.10.13-cp37-cp37m-win_amd64.whl", hash = "sha256:b97c1fac8c49be29486df85968682b0afa77e1b809aff74b83081cc115e52f33"}, - {file = "pydantic-1.10.13-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c958d053453a1c4b1c2062b05cd42d9d5c8eb67537b8d5a7e3c3032943ecd261"}, - {file = "pydantic-1.10.13-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c5370a7edaac06daee3af1c8b1192e305bc102abcbf2a92374b5bc793818599"}, - {file = "pydantic-1.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6f6e7305244bddb4414ba7094ce910560c907bdfa3501e9db1a7fd7eaea127"}, - {file = "pydantic-1.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3a3c792a58e1622667a2837512099eac62490cdfd63bd407993aaf200a4cf1f"}, - {file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c636925f38b8db208e09d344c7aa4f29a86bb9947495dd6b6d376ad10334fb78"}, - {file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:678bcf5591b63cc917100dc50ab6caebe597ac67e8c9ccb75e698f66038ea953"}, - {file = "pydantic-1.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:6cf25c1a65c27923a17b3da28a0bdb99f62ee04230c931d83e888012851f4e7f"}, - {file = "pydantic-1.10.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8ef467901d7a41fa0ca6db9ae3ec0021e3f657ce2c208e98cd511f3161c762c6"}, - {file = "pydantic-1.10.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:968ac42970f57b8344ee08837b62f6ee6f53c33f603547a55571c954a4225691"}, - {file = "pydantic-1.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9849f031cf8a2f0a928fe885e5a04b08006d6d41876b8bbd2fc68a18f9f2e3fd"}, - {file = "pydantic-1.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56e3ff861c3b9c6857579de282ce8baabf443f42ffba355bf070770ed63e11e1"}, - {file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f00790179497767aae6bcdc36355792c79e7bbb20b145ff449700eb076c5f96"}, - {file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:75b297827b59bc229cac1a23a2f7a4ac0031068e5be0ce385be1462e7e17a35d"}, - {file = "pydantic-1.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:e70ca129d2053fb8b728ee7d1af8e553a928d7e301a311094b8a0501adc8763d"}, - {file = "pydantic-1.10.13-py3-none-any.whl", hash = "sha256:b87326822e71bd5f313e7d3bfdc77ac3247035ac10b0c0618bd99dcf95b1e687"}, - {file = "pydantic-1.10.13.tar.gz", hash = "sha256:32c8b48dcd3b2ac4e78b0ba4af3a2c2eb6048cb75202f0ea7b34feb740efc340"}, + {file = "pydantic-1.10.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:22ed12ee588b1df028a2aa5d66f07bf8f8b4c8579c2e96d5a9c1f96b77f3bb55"}, + {file = "pydantic-1.10.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:75279d3cac98186b6ebc2597b06bcbc7244744f6b0b44a23e4ef01e5683cc0d2"}, + {file = "pydantic-1.10.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50f1666a9940d3d68683c9d96e39640f709d7a72ff8702987dab1761036206bb"}, + {file = "pydantic-1.10.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82790d4753ee5d00739d6cb5cf56bceb186d9d6ce134aca3ba7befb1eedbc2c8"}, + {file = "pydantic-1.10.15-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:d207d5b87f6cbefbdb1198154292faee8017d7495a54ae58db06762004500d00"}, + {file = "pydantic-1.10.15-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e49db944fad339b2ccb80128ffd3f8af076f9f287197a480bf1e4ca053a866f0"}, + {file = "pydantic-1.10.15-cp310-cp310-win_amd64.whl", hash = "sha256:d3b5c4cbd0c9cb61bbbb19ce335e1f8ab87a811f6d589ed52b0254cf585d709c"}, + {file = "pydantic-1.10.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c3d5731a120752248844676bf92f25a12f6e45425e63ce22e0849297a093b5b0"}, + {file = "pydantic-1.10.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c365ad9c394f9eeffcb30a82f4246c0006417f03a7c0f8315d6211f25f7cb654"}, + {file = "pydantic-1.10.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3287e1614393119c67bd4404f46e33ae3be3ed4cd10360b48d0a4459f420c6a3"}, + {file = "pydantic-1.10.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be51dd2c8596b25fe43c0a4a59c2bee4f18d88efb8031188f9e7ddc6b469cf44"}, + {file = "pydantic-1.10.15-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6a51a1dd4aa7b3f1317f65493a182d3cff708385327c1c82c81e4a9d6d65b2e4"}, + {file = "pydantic-1.10.15-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4e316e54b5775d1eb59187f9290aeb38acf620e10f7fd2f776d97bb788199e53"}, + {file = "pydantic-1.10.15-cp311-cp311-win_amd64.whl", hash = "sha256:0d142fa1b8f2f0ae11ddd5e3e317dcac060b951d605fda26ca9b234b92214986"}, + {file = "pydantic-1.10.15-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7ea210336b891f5ea334f8fc9f8f862b87acd5d4a0cbc9e3e208e7aa1775dabf"}, + {file = "pydantic-1.10.15-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3453685ccd7140715e05f2193d64030101eaad26076fad4e246c1cc97e1bb30d"}, + {file = "pydantic-1.10.15-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bea1f03b8d4e8e86702c918ccfd5d947ac268f0f0cc6ed71782e4b09353b26f"}, + {file = "pydantic-1.10.15-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:005655cabc29081de8243126e036f2065bd7ea5b9dff95fde6d2c642d39755de"}, + {file = "pydantic-1.10.15-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:af9850d98fc21e5bc24ea9e35dd80a29faf6462c608728a110c0a30b595e58b7"}, + {file = "pydantic-1.10.15-cp37-cp37m-win_amd64.whl", hash = "sha256:d31ee5b14a82c9afe2bd26aaa405293d4237d0591527d9129ce36e58f19f95c1"}, + {file = "pydantic-1.10.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5e09c19df304b8123938dc3c53d3d3be6ec74b9d7d0d80f4f4b5432ae16c2022"}, + {file = "pydantic-1.10.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7ac9237cd62947db00a0d16acf2f3e00d1ae9d3bd602b9c415f93e7a9fc10528"}, + {file = "pydantic-1.10.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:584f2d4c98ffec420e02305cf675857bae03c9d617fcfdc34946b1160213a948"}, + {file = "pydantic-1.10.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbc6989fad0c030bd70a0b6f626f98a862224bc2b1e36bfc531ea2facc0a340c"}, + {file = "pydantic-1.10.15-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d573082c6ef99336f2cb5b667b781d2f776d4af311574fb53d908517ba523c22"}, + {file = "pydantic-1.10.15-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6bd7030c9abc80134087d8b6e7aa957e43d35714daa116aced57269a445b8f7b"}, + {file = "pydantic-1.10.15-cp38-cp38-win_amd64.whl", hash = "sha256:3350f527bb04138f8aff932dc828f154847fbdc7a1a44c240fbfff1b57f49a12"}, + {file = "pydantic-1.10.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:51d405b42f1b86703555797270e4970a9f9bd7953f3990142e69d1037f9d9e51"}, + {file = "pydantic-1.10.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a980a77c52723b0dc56640ced396b73a024d4b74f02bcb2d21dbbac1debbe9d0"}, + {file = "pydantic-1.10.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67f1a1fb467d3f49e1708a3f632b11c69fccb4e748a325d5a491ddc7b5d22383"}, + {file = "pydantic-1.10.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:676ed48f2c5bbad835f1a8ed8a6d44c1cd5a21121116d2ac40bd1cd3619746ed"}, + {file = "pydantic-1.10.15-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:92229f73400b80c13afcd050687f4d7e88de9234d74b27e6728aa689abcf58cc"}, + {file = "pydantic-1.10.15-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2746189100c646682eff0bce95efa7d2e203420d8e1c613dc0c6b4c1d9c1fde4"}, + {file = "pydantic-1.10.15-cp39-cp39-win_amd64.whl", hash = "sha256:394f08750bd8eaad714718812e7fab615f873b3cdd0b9d84e76e51ef3b50b6b7"}, + {file = "pydantic-1.10.15-py3-none-any.whl", hash = "sha256:28e552a060ba2740d0d2aabe35162652c1459a0b9069fe0db7f4ee0e18e74d58"}, + {file = "pydantic-1.10.15.tar.gz", hash = "sha256:ca832e124eda231a60a041da4f013e3ff24949d94a01154b137fc2f2a43c3ffb"}, ] [package.dependencies] @@ -1214,93 +1214,93 @@ extra = ["pygments (>=2.12)"] [[package]] name = "pymongo" -version = "4.6.1" +version = "4.6.3" description = "Python driver for MongoDB " optional = false python-versions = ">=3.7" files = [ - {file = "pymongo-4.6.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4344c30025210b9fa80ec257b0e0aab5aa1d5cca91daa70d82ab97b482cc038e"}, - {file = "pymongo-4.6.1-cp310-cp310-manylinux1_i686.whl", hash = "sha256:1c5654bb8bb2bdb10e7a0bc3c193dd8b49a960b9eebc4381ff5a2043f4c3c441"}, - {file = "pymongo-4.6.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:eaf2f65190c506def2581219572b9c70b8250615dc918b3b7c218361a51ec42e"}, - {file = "pymongo-4.6.1-cp310-cp310-manylinux2014_i686.whl", hash = "sha256:262356ea5fcb13d35fb2ab6009d3927bafb9504ef02339338634fffd8a9f1ae4"}, - {file = "pymongo-4.6.1-cp310-cp310-manylinux2014_ppc64le.whl", hash = "sha256:2dd2f6960ee3c9360bed7fb3c678be0ca2d00f877068556785ec2eb6b73d2414"}, - {file = "pymongo-4.6.1-cp310-cp310-manylinux2014_s390x.whl", hash = "sha256:ff925f1cca42e933376d09ddc254598f8c5fcd36efc5cac0118bb36c36217c41"}, - {file = "pymongo-4.6.1-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:3cadf7f4c8e94d8a77874b54a63c80af01f4d48c4b669c8b6867f86a07ba994f"}, - {file = "pymongo-4.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55dac73316e7e8c2616ba2e6f62b750918e9e0ae0b2053699d66ca27a7790105"}, - {file = "pymongo-4.6.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:154b361dcb358ad377d5d40df41ee35f1cc14c8691b50511547c12404f89b5cb"}, - {file = "pymongo-4.6.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2940aa20e9cc328e8ddeacea8b9a6f5ddafe0b087fedad928912e787c65b4909"}, - {file = "pymongo-4.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:010bc9aa90fd06e5cc52c8fac2c2fd4ef1b5f990d9638548dde178005770a5e8"}, - {file = "pymongo-4.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e470fa4bace5f50076c32f4b3cc182b31303b4fefb9b87f990144515d572820b"}, - {file = "pymongo-4.6.1-cp310-cp310-win32.whl", hash = "sha256:da08ea09eefa6b960c2dd9a68ec47949235485c623621eb1d6c02b46765322ac"}, - {file = "pymongo-4.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:13d613c866f9f07d51180f9a7da54ef491d130f169e999c27e7633abe8619ec9"}, - {file = "pymongo-4.6.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6a0ae7a48a6ef82ceb98a366948874834b86c84e288dbd55600c1abfc3ac1d88"}, - {file = "pymongo-4.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bd94c503271e79917b27c6e77f7c5474da6930b3fb9e70a12e68c2dff386b9a"}, - {file = "pymongo-4.6.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2d4ccac3053b84a09251da8f5350bb684cbbf8c8c01eda6b5418417d0a8ab198"}, - {file = "pymongo-4.6.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:349093675a2d3759e4fb42b596afffa2b2518c890492563d7905fac503b20daa"}, - {file = "pymongo-4.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88beb444fb438385e53dc9110852910ec2a22f0eab7dd489e827038fdc19ed8d"}, - {file = "pymongo-4.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8e62d06e90f60ea2a3d463ae51401475568b995bafaffd81767d208d84d7bb1"}, - {file = "pymongo-4.6.1-cp311-cp311-win32.whl", hash = "sha256:5556e306713e2522e460287615d26c0af0fe5ed9d4f431dad35c6624c5d277e9"}, - {file = "pymongo-4.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:b10d8cda9fc2fcdcfa4a000aa10413a2bf8b575852cd07cb8a595ed09689ca98"}, - {file = "pymongo-4.6.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b435b13bb8e36be11b75f7384a34eefe487fe87a6267172964628e2b14ecf0a7"}, - {file = "pymongo-4.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e438417ce1dc5b758742e12661d800482200b042d03512a8f31f6aaa9137ad40"}, - {file = "pymongo-4.6.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b47ebd89e69fbf33d1c2df79759d7162fc80c7652dacfec136dae1c9b3afac7"}, - {file = "pymongo-4.6.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bbed8cccebe1169d45cedf00461b2842652d476d2897fd1c42cf41b635d88746"}, - {file = "pymongo-4.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c30a9e06041fbd7a7590693ec5e407aa8737ad91912a1e70176aff92e5c99d20"}, - {file = "pymongo-4.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8729dbf25eb32ad0dc0b9bd5e6a0d0b7e5c2dc8ec06ad171088e1896b522a74"}, - {file = "pymongo-4.6.1-cp312-cp312-win32.whl", hash = "sha256:3177f783ae7e08aaf7b2802e0df4e4b13903520e8380915e6337cdc7a6ff01d8"}, - {file = "pymongo-4.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:00c199e1c593e2c8b033136d7a08f0c376452bac8a896c923fcd6f419e07bdd2"}, - {file = "pymongo-4.6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6dcc95f4bb9ed793714b43f4f23a7b0c57e4ef47414162297d6f650213512c19"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:13552ca505366df74e3e2f0a4f27c363928f3dff0eef9f281eb81af7f29bc3c5"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:77e0df59b1a4994ad30c6d746992ae887f9756a43fc25dec2db515d94cf0222d"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3a7f02a58a0c2912734105e05dedbee4f7507e6f1bd132ebad520be0b11d46fd"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:026a24a36394dc8930cbcb1d19d5eb35205ef3c838a7e619e04bd170713972e7"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:3b287e814a01deddb59b88549c1e0c87cefacd798d4afc0c8bd6042d1c3d48aa"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:9a710c184ba845afb05a6f876edac8f27783ba70e52d5eaf939f121fc13b2f59"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:30b2c9caf3e55c2e323565d1f3b7e7881ab87db16997dc0cbca7c52885ed2347"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff62ba8ff70f01ab4fe0ae36b2cb0b5d1f42e73dfc81ddf0758cd9f77331ad25"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:547dc5d7f834b1deefda51aedb11a7af9c51c45e689e44e14aa85d44147c7657"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1de3c6faf948f3edd4e738abdb4b76572b4f4fdfc1fed4dad02427e70c5a6219"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2831e05ce0a4df10c4ac5399ef50b9a621f90894c2a4d2945dc5658765514ed"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:144a31391a39a390efce0c5ebcaf4bf112114af4384c90163f402cec5ede476b"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33bb16a07d3cc4e0aea37b242097cd5f7a156312012455c2fa8ca396953b11c4"}, - {file = "pymongo-4.6.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b7b1a83ce514700276a46af3d9e481ec381f05b64939effc9065afe18456a6b9"}, - {file = "pymongo-4.6.1-cp37-cp37m-win32.whl", hash = "sha256:3071ec998cc3d7b4944377e5f1217c2c44b811fae16f9a495c7a1ce9b42fb038"}, - {file = "pymongo-4.6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2346450a075625c4d6166b40a013b605a38b6b6168ce2232b192a37fb200d588"}, - {file = "pymongo-4.6.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:061598cbc6abe2f382ab64c9caa83faa2f4c51256f732cdd890bcc6e63bfb67e"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:d483793a384c550c2d12cb794ede294d303b42beff75f3b3081f57196660edaf"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:f9756f1d25454ba6a3c2f1ef8b7ddec23e5cdeae3dc3c3377243ae37a383db00"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:1ed23b0e2dac6f84f44c8494fbceefe6eb5c35db5c1099f56ab78fc0d94ab3af"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:3d18a9b9b858ee140c15c5bfcb3e66e47e2a70a03272c2e72adda2482f76a6ad"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:c258dbacfff1224f13576147df16ce3c02024a0d792fd0323ac01bed5d3c545d"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:f7acc03a4f1154ba2643edeb13658d08598fe6e490c3dd96a241b94f09801626"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:76013fef1c9cd1cd00d55efde516c154aa169f2bf059b197c263a255ba8a9ddf"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f0e6a6c807fa887a0c51cc24fe7ea51bb9e496fe88f00d7930063372c3664c3"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd1fa413f8b9ba30140de198e4f408ffbba6396864c7554e0867aa7363eb58b2"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d219b4508f71d762368caec1fc180960569766049bbc4d38174f05e8ef2fe5b"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27b81ecf18031998ad7db53b960d1347f8f29e8b7cb5ea7b4394726468e4295e"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56816e43c92c2fa8c11dc2a686f0ca248bea7902f4a067fa6cbc77853b0f041e"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef801027629c5b511cf2ba13b9be29bfee36ae834b2d95d9877818479cdc99ea"}, - {file = "pymongo-4.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d4c2be9760b112b1caf649b4977b81b69893d75aa86caf4f0f398447be871f3c"}, - {file = "pymongo-4.6.1-cp38-cp38-win32.whl", hash = "sha256:39d77d8bbb392fa443831e6d4ae534237b1f4eee6aa186f0cdb4e334ba89536e"}, - {file = "pymongo-4.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:4497d49d785482cc1a44a0ddf8830b036a468c088e72a05217f5b60a9e025012"}, - {file = "pymongo-4.6.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:69247f7a2835fc0984bbf0892e6022e9a36aec70e187fcfe6cae6a373eb8c4de"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:7bb0e9049e81def6829d09558ad12d16d0454c26cabe6efc3658e544460688d9"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:6a1810c2cbde714decf40f811d1edc0dae45506eb37298fd9d4247b8801509fe"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:e2aced6fb2f5261b47d267cb40060b73b6527e64afe54f6497844c9affed5fd0"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:d0355cff58a4ed6d5e5f6b9c3693f52de0784aa0c17119394e2a8e376ce489d4"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:3c74f4725485f0a7a3862cfd374cc1b740cebe4c133e0c1425984bcdcce0f4bb"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:9c79d597fb3a7c93d7c26924db7497eba06d58f88f58e586aa69b2ad89fee0f8"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:8ec75f35f62571a43e31e7bd11749d974c1b5cd5ea4a8388725d579263c0fdf6"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5e641f931c5cd95b376fd3c59db52770e17bec2bf86ef16cc83b3906c054845"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9aafd036f6f2e5ad109aec92f8dbfcbe76cff16bad683eb6dd18013739c0b3ae"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f2b856518bfcfa316c8dae3d7b412aecacf2e8ba30b149f5eb3b63128d703b9"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec31adc2e988fd7db3ab509954791bbc5a452a03c85e45b804b4bfc31fa221d"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9167e735379ec43d8eafa3fd675bfbb12e2c0464f98960586e9447d2cf2c7a83"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1461199b07903fc1424709efafe379205bf5f738144b1a50a08b0396357b5abf"}, - {file = "pymongo-4.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3094c7d2f820eecabadae76bfec02669567bbdd1730eabce10a5764778564f7b"}, - {file = "pymongo-4.6.1-cp39-cp39-win32.whl", hash = "sha256:c91ea3915425bd4111cb1b74511cdc56d1d16a683a48bf2a5a96b6a6c0f297f7"}, - {file = "pymongo-4.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:ef102a67ede70e1721fe27f75073b5314911dbb9bc27cde0a1c402a11531e7bd"}, - {file = "pymongo-4.6.1.tar.gz", hash = "sha256:31dab1f3e1d0cdd57e8df01b645f52d43cc1b653ed3afd535d2891f4fc4f9712"}, + {file = "pymongo-4.6.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e344d0afdd7c06c1f1e66a4736593293f432defc2191e6b411fc9c82fa8c5adc"}, + {file = "pymongo-4.6.3-cp310-cp310-manylinux1_i686.whl", hash = "sha256:731a92dfc4022db763bfa835c6bd160f2d2cba6ada75749c2ed500e13983414b"}, + {file = "pymongo-4.6.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:c4726e36a2f7e92f09f5b8e92ba4db7525daffe31a0dcbcf0533edc0ade8c7d8"}, + {file = "pymongo-4.6.3-cp310-cp310-manylinux2014_i686.whl", hash = "sha256:00e6cfce111883ca63a3c12878286e0b89871f4b840290e61fb6f88ee0e687be"}, + {file = "pymongo-4.6.3-cp310-cp310-manylinux2014_ppc64le.whl", hash = "sha256:cc7a26edf79015c58eea46feb5b262cece55bc1d4929a8a9e0cbe7e6d6a9b0eb"}, + {file = "pymongo-4.6.3-cp310-cp310-manylinux2014_s390x.whl", hash = "sha256:4955be64d943b30f2a7ff98d818ca530f7cb37450bc6b32c37e0e74821907ef8"}, + {file = "pymongo-4.6.3-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:af039afc6d787502c02089759778b550cb2f25dbe2780f5b050a2e37031c3fbf"}, + {file = "pymongo-4.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ccc15a7c7a99aed7d0831eaf78a607f1db0c7a255f96e3d18984231acd72f70c"}, + {file = "pymongo-4.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8e97c138d811e9367723fcd07c4402a9211caae20479fdd6301d57762778a69f"}, + {file = "pymongo-4.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ebcc145c74d06296ce0cad35992185064e5cb2aadef719586778c144f0cd4d37"}, + {file = "pymongo-4.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:664c64b6bdb31aceb80f0556951e5e2bf50d359270732268b4e7af00a1cf5d6c"}, + {file = "pymongo-4.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4056bc421d4df2c61db4e584415f2b0f1eebb92cbf9222f7f38303467c37117"}, + {file = "pymongo-4.6.3-cp310-cp310-win32.whl", hash = "sha256:cdbea2aac1a4caa66ee912af3601557d2bda2f9f69feec83601c78c7e53ece64"}, + {file = "pymongo-4.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:6cec7279e5a1b74b257d0270a8c97943d745811066630a6bc6beb413c68c6a33"}, + {file = "pymongo-4.6.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:138b9fa18d40401c217bc038a48bcde4160b02d36d8632015b1804971a2eaa2f"}, + {file = "pymongo-4.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60931b0e07448afe8866ffff764cd5bf4b1a855dc84c7dcb3974c6aa6a377a59"}, + {file = "pymongo-4.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9b35f8bded43ff91475305445fedf0613f880ff7e25c75ae1028e1260a9b7a86"}, + {file = "pymongo-4.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:872bad5c83f7eec9da11e1fef5f858c6a4c79fe4a83c7780e7b0fe95d560ae3f"}, + {file = "pymongo-4.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2ad3e5bfcd345c0bfe9af69a82d720860b5b043c1657ffb513c18a0dee19c19"}, + {file = "pymongo-4.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e208f2ab7b495eff8fd175022abfb0abce6307ac5aee3f4de51fc1a459b71c9"}, + {file = "pymongo-4.6.3-cp311-cp311-win32.whl", hash = "sha256:4670edbb5ddd71a4d555668ef99b032a5f81b59e4145d66123aa0d831eac7883"}, + {file = "pymongo-4.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:1c2761302b6cbfd12e239ce1b8061d4cf424a361d199dcb32da534985cae9350"}, + {file = "pymongo-4.6.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:722f2b709b63311c0efda4fa4c603661faa4bec6bad24a6cc41a3bc6d841bf09"}, + {file = "pymongo-4.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:994386a4d6ad39e18bcede6dc8d1d693ec3ed897b88f86b1841fbc37227406da"}, + {file = "pymongo-4.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:391aea047bba928006114282f175bc8d09c53fe1b7d8920bf888325e229302fe"}, + {file = "pymongo-4.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4330c022024e7994b630199cdae909123e4b0e9cf15335de71b146c0f6a2435"}, + {file = "pymongo-4.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01277a7e183c59081368e4efbde2b8f577014431b257959ca98d3a4e8682dd51"}, + {file = "pymongo-4.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d30d5d7963453b478016bf7b0d87d7089ca24d93dbdecfbc9aa32f1b4772160a"}, + {file = "pymongo-4.6.3-cp312-cp312-win32.whl", hash = "sha256:a023804a3ac0f85d4510265b60978522368b5815772262e61e3a2222a8b315c9"}, + {file = "pymongo-4.6.3-cp312-cp312-win_amd64.whl", hash = "sha256:2a6ae9a600bbc2dbff719c98bf5da584fb8a4f2bb23729a09be2e9c3dbc61c8a"}, + {file = "pymongo-4.6.3-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:3b909e5b1864de01510079b39bbdc480720c37747be5552b354bc73f02c24a3c"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:48c60bd32ec141c0d45d8471179430003d9fb4490da181b8165fb1dce9cc255c"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:36d7049fc183fe4edda3eae7f66ea14c660921429e082fe90b4b7f4dc6664a70"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:18e5c161b18660f1c9d1f78236de45520a436be65e42b7bb51f25f74ad22bdde"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:e458e6fc2b7dd40d15cda04898bd2d8c9ff7ae086c516bc261628d54eb4e3158"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:e420e74c6db4594a6d09f39b58c0772679006cb0b4fc40901ba608794d87dad2"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:9c9340c7161e112e36ebb97fbba1cdbe7db3dfacb694d2918b1f155a01f3d859"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:26d036e0f5de09d0b21d0fc30314fcf2ae6359e4d43ae109aa6cf27b4ce02d30"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7cf28d9c90e40d4e385b858e4095739829f466f23e08674085161d86bb4bb10"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9066dff9dc0a182478ca5885d0b8a2b820b462e19459ada109df7a3ced31b272"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1e1586ebdebe0447a24842480defac17c496430a218486c96e2da3f164c0f05"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b3853fb66bf34ce1b6e573e1bbb3cb28763be9d1f57758535757faf1ab2f24a"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:462684a6f5ce6f2661c30eab4d1d459231e0eed280f338e716e31a24fc09ccb3"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a4ea44e5a913bdb7c9abd34c69e9fcfac10dfaf49765463e0dc1ea922dd2a9d"}, + {file = "pymongo-4.6.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:098d420a8214ad25f872de7e8b309441995d12ece0376218a04d9ed5d2222cf3"}, + {file = "pymongo-4.6.3-cp37-cp37m-win32.whl", hash = "sha256:7330245253fbe2e09845069d2f4d35dd27f63e377034c94cb0ddac18bc8b0d82"}, + {file = "pymongo-4.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:151361c101600a85cb1c1e0db4e4b28318b521fcafa9b62d389f7342faaaee80"}, + {file = "pymongo-4.6.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:4d167d546352869125dc86f6fda6dffc627d8a9c8963eaee665825f2520d542b"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:eaf3d594ebfd5e1f3503d81e06a5d78e33cda27418b36c2491c3d4ad4fca5972"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7ee79e02a7c5ed34706ecb5dad19e6c7d267cf86d28c075ef3127c58f3081279"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:af5c5112db04cf62a5d9d224a24f289aaecb47d152c08a457cca81cee061d5bd"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:6b5aec78aa4840e8d6c3881900259892ab5733a366696ca10d99d68c3d73eaaf"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:9757602fb45c8ecc1883fe6db7c59c19d87eb3c645ec9342d28a6026837da931"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:dde9fb6e105ce054339256a8b7a9775212ebb29596ef4e402d7bbc63b354d202"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:7df8b166d3db6cfead4cf55b481408d8f0935d8bd8d6dbf64507c49ef82c7200"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53451190b8628e1ce7d1fe105dc376c3f10705127bd3b51fe3e107b9ff1851e6"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:75107a386d4ccf5291e75cce8ca3898430e7907f4cc1208a17c9efad33a1ea84"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4a0660ce32d8459b7f12dc3ca0141528fead62d3cce31b548f96f30902074cc0"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa310096450e9c461b7dfd66cbc1c41771fe36c06200440bb3e062b1d4a06b6e"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5f465cca9b178e7bb782f952dd58e9e92f8ba056e585959465f2bb50feddef5f"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c67c19f653053ef2ebd7f1837c2978400058d6d7f66ec5760373a21eaf660158"}, + {file = "pymongo-4.6.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c701de8e483fb5e53874aab642235361aac6de698146b02c644389eaa8c137b6"}, + {file = "pymongo-4.6.3-cp38-cp38-win32.whl", hash = "sha256:90525454546536544307e6da9c81f331a71a1b144e2d038fec587cc9f9250285"}, + {file = "pymongo-4.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:3e1ba5a037c526a3f4060c28f8d45d71ed9626e2bf954b0cd9a8dcc3b45172ee"}, + {file = "pymongo-4.6.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:14a82593528cddc93cfea5ee78fac95ae763a3a4e124ca79ee0b24fbbc6da1c9"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:cd6c15242d9306ff1748681c3235284cbe9f807aeaa86cd17d85e72af626e9a7"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:6de33f1b2eed91b802ec7abeb92ffb981d052f3604b45588309aae9e0f6e3c02"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:0182899aafe830f25cf96c5976d724efeaaf7b6646c15424ad8dd25422b2efe1"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:8d0ea740a2faa56f930dc82c5976d96c017ece26b29a1cddafb58721c7aab960"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:5c8a4982f5eb767c6fbfb8fb378683d09bcab7c3251ba64357eef600d43f6c23"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:becfa816545a48c8e740ac2fd624c1c121e1362072d68ffcf37a6b1be8ea187e"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:ff7d1f449fcad23d9bc8e8dc2b9972be38bcd76d99ea5f7d29b2efa929c2a7ff"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e097f877de4d6af13a33ef938bf2a2350f424be5deabf8b857da95f5b080487a"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:705a9bfd619301ee7e985d6f91f68b15dfcb2f6f36b8cc225cc82d4260d2bce5"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ef1b4992ee1cb8bb16745e70afa0c02c5360220a7a8bb4775888721f052d0a6"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3d10bdd46cbc35a2109737d36ffbef32e7420569a87904738ad444ccb7ac2c5"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17c1c143ba77d6e21fc8b48e93f0a5ed982a23447434e9ee4fbb6d633402506b"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e51e30d67b468a2a634ade928b30cb3e420127f148a9aec60de33f39087bdc4"}, + {file = "pymongo-4.6.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:bec8e4e88984be157408f1923d25869e1b575c07711cdbdde596f66931800934"}, + {file = "pymongo-4.6.3-cp39-cp39-win32.whl", hash = "sha256:98877a9c4ad42df8253a12d8d17a3265781d1feb5c91c767bd153f88feb0b670"}, + {file = "pymongo-4.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:6d5b35da9e16cda630baed790ffc3d0d01029d269523a7cec34d2ec7e6823e75"}, + {file = "pymongo-4.6.3.tar.gz", hash = "sha256:400074090b9a631f120b42c61b222fd743490c133a5d2f99c0208cefcccc964e"}, ] [package.dependencies] @@ -1392,13 +1392,13 @@ resolved_reference = "4891556e7db831a5a9b27d4bad8ff102609b2a2c" [[package]] name = "pytest" -version = "8.0.0" +version = "8.1.1" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.0.0-py3-none-any.whl", hash = "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6"}, - {file = "pytest-8.0.0.tar.gz", hash = "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c"}, + {file = "pytest-8.1.1-py3-none-any.whl", hash = "sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7"}, + {file = "pytest-8.1.1.tar.gz", hash = "sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044"}, ] [package.dependencies] @@ -1406,21 +1406,21 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" -pluggy = ">=1.3.0,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} +pluggy = ">=1.4,<2.0" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +testing = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-cov" -version = "4.1.0" +version = "5.0.0" description = "Pytest plugin for measuring coverage." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, - {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, + {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, + {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, ] [package.dependencies] @@ -1428,7 +1428,7 @@ coverage = {version = ">=5.2.1", extras = ["toml"]} pytest = ">=4.6" [package.extras] -testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] [[package]] name = "python-dateutil" @@ -1681,13 +1681,13 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "requests-cache" -version = "1.1.1" +version = "1.2.0" description = "A persistent cache for python requests" optional = false -python-versions = ">=3.7,<4.0" +python-versions = ">=3.8" files = [ - {file = "requests_cache-1.1.1-py3-none-any.whl", hash = "sha256:c8420cf096f3aafde13c374979c21844752e2694ffd8710e6764685bb577ac90"}, - {file = "requests_cache-1.1.1.tar.gz", hash = "sha256:764f93d3fa860be72125a568c2cc8eafb151cf29b4dc2515433a56ee657e1c60"}, + {file = "requests_cache-1.2.0-py3-none-any.whl", hash = "sha256:490324301bf0cb924ff4e6324bd2613453e7e1f847353928b08adb0fdfb7f722"}, + {file = "requests_cache-1.2.0.tar.gz", hash = "sha256:db1c709ca343cc1cd5b6c8b1a5387298eceed02306a6040760db538c885e3838"}, ] [package.dependencies] @@ -1699,43 +1699,43 @@ url-normalize = ">=1.4" urllib3 = ">=1.25.5" [package.extras] -all = ["boto3 (>=1.15)", "botocore (>=1.18)", "itsdangerous (>=2.0)", "pymongo (>=3)", "pyyaml (>=5.4)", "redis (>=3)", "ujson (>=5.4)"] +all = ["boto3 (>=1.15)", "botocore (>=1.18)", "itsdangerous (>=2.0)", "pymongo (>=3)", "pyyaml (>=6.0.1)", "redis (>=3)", "ujson (>=5.4)"] bson = ["bson (>=0.5)"] -docs = ["furo (>=2023.3,<2024.0)", "linkify-it-py (>=2.0,<3.0)", "myst-parser (>=1.0,<2.0)", "sphinx (>=5.0.2,<6.0.0)", "sphinx-autodoc-typehints (>=1.19)", "sphinx-automodapi (>=0.14)", "sphinx-copybutton (>=0.5)", "sphinx-design (>=0.2)", "sphinx-notfound-page (>=0.8)", "sphinxcontrib-apidoc (>=0.3)", "sphinxext-opengraph (>=0.6)"] +docs = ["furo (>=2023.3,<2024.0)", "linkify-it-py (>=2.0,<3.0)", "myst-parser (>=1.0,<2.0)", "sphinx (>=5.0.2,<6.0.0)", "sphinx-autodoc-typehints (>=1.19)", "sphinx-automodapi (>=0.14)", "sphinx-copybutton (>=0.5)", "sphinx-design (>=0.2)", "sphinx-notfound-page (>=0.8)", "sphinxcontrib-apidoc (>=0.3)", "sphinxext-opengraph (>=0.9)"] dynamodb = ["boto3 (>=1.15)", "botocore (>=1.18)"] json = ["ujson (>=5.4)"] mongodb = ["pymongo (>=3)"] redis = ["redis (>=3)"] security = ["itsdangerous (>=2.0)"] -yaml = ["pyyaml (>=5.4)"] +yaml = ["pyyaml (>=6.0.1)"] [[package]] name = "requests-ratelimiter" -version = "0.4.2" +version = "0.6.0" description = "Rate-limiting for the requests library" optional = false python-versions = ">=3.7,<4.0" files = [ - {file = "requests_ratelimiter-0.4.2-py3-none-any.whl", hash = "sha256:1cf68b02cca692f8dfa8eca12653a3ee177bea3208f8dd7533bb52a3ab14d16c"}, - {file = "requests_ratelimiter-0.4.2.tar.gz", hash = "sha256:7b804c9484617e3e7c372eba81b9dedfe6d34904d1ceb8b7c4a4f3b864dc0dbd"}, + {file = "requests_ratelimiter-0.6.0-py3-none-any.whl", hash = "sha256:0641ec7b3dd919a64a8e390358d416c7369f89eb61bec0cf6113ea8d2ec4f072"}, + {file = "requests_ratelimiter-0.6.0.tar.gz", hash = "sha256:ad72f033323df3c1012a3ac4bb91be36606741f58ec9faede62b89b475ea99f6"}, ] [package.dependencies] -pyrate-limiter = ">=2.8,<3.0" +pyrate-limiter = "<3.0" requests = ">=2.20" [package.extras] -docs = ["furo (>=2022.12,<2023.0)", "myst-parser (>=0.17)", "sphinx (>=5.2,<6.0)", "sphinx-autodoc-typehints (>=1.22,<2.0)", "sphinx-copybutton (>=0.5)"] +docs = ["furo (>=2023.3,<2024.0)", "myst-parser (>=1.0)", "sphinx (>=5.2,<6.0)", "sphinx-autodoc-typehints (>=1.22,<2.0)", "sphinx-copybutton (>=0.5)"] [[package]] name = "ruamel-yaml" -version = "0.18.5" +version = "0.18.6" description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" optional = false python-versions = ">=3.7" files = [ - {file = "ruamel.yaml-0.18.5-py3-none-any.whl", hash = "sha256:a013ac02f99a69cdd6277d9664689eb1acba07069f912823177c5eced21a6ada"}, - {file = "ruamel.yaml-0.18.5.tar.gz", hash = "sha256:61917e3a35a569c1133a8f772e1226961bf5a1198bea7e23f06a0841dea1ab0e"}, + {file = "ruamel.yaml-0.18.6-py3-none-any.whl", hash = "sha256:57b53ba33def16c4f3d807c0ccbc00f8a6081827e81ba2491691b76882d0c636"}, + {file = "ruamel.yaml-0.18.6.tar.gz", hash = "sha256:8b27e6a217e786c6fbe5634d8f3f11bc63e0f80f6a5890f28863d9c45aac311b"}, ] [package.dependencies] @@ -2139,4 +2139,4 @@ testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>= [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "b3fa129675c8c3082e6cdc84e3019b9130a7b359f021baf714e8393c1a171dc1" +content-hash = "e292c76c15cb1f537bdd38d763f4c809c54e59f07496b422e8bb796f9a0d981f" diff --git a/pyproject.toml b/pyproject.toml index 92018c707..afae64c34 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "splunk-connect-for-snmp" -version = "1.10.0" +version = "1.11.0-beta.6" description = "" authors = ["omrozowicz-splunk "] license = "Apache-2.0" @@ -33,8 +33,8 @@ opentelemetry-instrumentation-logging = "*" opentelemetry-exporter-jaeger-thrift = "^1.10.0" pyrate-limiter = "^2.10.0" requests-cache = "^1.0.0" -requests-ratelimiter = "^0.4.0" -mongoengine = "^0.27.0" +requests-ratelimiter = "^0.6.0" +mongoengine = "^0.28.0" celery-redbeat = {git = "https://github.com/splunk/redbeat", branch = "main"} PyYAML = "^6.0" #Note this is temporary PR to upstream project is issued @@ -48,7 +48,7 @@ urllib3 = "^1.26.17" [tool.poetry.group.dev.dependencies] pytest = "^8.0.0" -pytest-cov = "^4.0.0" +pytest-cov = "^5.0.0" mike = "^2.0.0" mkdocs = "^1.2.2" mkdocs-material = "^9.0.0" diff --git a/render_manifests.sh b/render_manifests.sh index a6afd3a69..e569d974b 100755 --- a/render_manifests.sh +++ b/render_manifests.sh @@ -6,7 +6,7 @@ prefix="$DIR/values_" suffix=".yaml" declare -a TEST_CASES=() -for file in $prefix*; do +for file in "$prefix"*; do if [ -f "$file" ]; then filename=${file#"$prefix"} # Remove prefix. filename=${filename%"$suffix"} # Remove suffix. @@ -18,7 +18,10 @@ for test_case in "${TEST_CASES[@]}"; do VALUES_FILE="$DIR/values_${test_case}.yaml" MANIFEST_DIR="$DIR/manifests/tests_${test_case}" helm template --values "${VALUES_FILE}" --output-dir "${MANIFEST_DIR}" -n default charts/splunk-connect-for-snmp - APPVERSION=`sed -nr 's/appVersion: "(.+)"/\1/p' charts/splunk-connect-for-snmp/Chart.yaml` #Find current app version - sed -i '' -e 's/'"$APPVERSION"'/CURRENT-VERSION/g' $(find rendered/ -type f) #Replace version with default var + APPVERSION="$(sed -nr 's/appVersion: "(.+)"/\1/p' charts/splunk-connect-for-snmp/Chart.yaml)" #Find current app version + FILES=$(find rendered/ -type f) + for f in $FILES; do + sed -i '' -e 's/'"$APPVERSION"'/CURRENT-VERSION/g' "$f" #Replace version with default var + done rm -rf "${MANIFEST_DIR}"/splunk-connect-for-snmp/charts done diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/inventory/job.yaml index 40062ecdb..bbbb3fc46 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -48,6 +48,8 @@ spec: value: "60" - name: CONFIG_FROM_MONGO value: "false" + - name: ENABLE_FULL_WALK + value: "false" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index ce10d78b0..4b03ed67b 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -105,6 +105,12 @@ spec: value: "4" - name: PREFETCH_COUNT value: "30" + - name: RESOLVE_TRAP_ADDRESS + value: "false" + - name: MAX_DNS_CACHE_SIZE_TRAPS + value: "500" + - name: TTL_DNS_CACHE_TRAPS + value: "1800" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml index 40062ecdb..bbbb3fc46 100644 --- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -48,6 +48,8 @@ spec: value: "60" - name: CONFIG_FROM_MONGO value: "false" + - name: ENABLE_FULL_WALK + value: "false" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index dbd548386..058bde528 100644 --- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -104,6 +104,12 @@ spec: value: "4" - name: PREFETCH_COUNT value: "30" + - name: RESOLVE_TRAP_ADDRESS + value: "false" + - name: MAX_DNS_CACHE_SIZE_TRAPS + value: "500" + - name: TTL_DNS_CACHE_TRAPS + value: "1800" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/inventory/job.yaml index 40062ecdb..bbbb3fc46 100644 --- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -48,6 +48,8 @@ spec: value: "60" - name: CONFIG_FROM_MONGO value: "false" + - name: ENABLE_FULL_WALK + value: "false" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index dbd548386..058bde528 100644 --- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -104,6 +104,12 @@ spec: value: "4" - name: PREFETCH_COUNT value: "30" + - name: RESOLVE_TRAP_ADDRESS + value: "false" + - name: MAX_DNS_CACHE_SIZE_TRAPS + value: "500" + - name: TTL_DNS_CACHE_TRAPS + value: "1800" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/inventory/job.yaml index 3134f724e..6230df48f 100644 --- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -48,6 +48,8 @@ spec: value: "60" - name: CONFIG_FROM_MONGO value: "true" + - name: ENABLE_FULL_WALK + value: "false" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/configmap-backend.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/configmap-backend.yaml index 78b8c4ba5..87d82d69c 100644 --- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/configmap-backend.yaml +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/ui/configmap-backend.yaml @@ -53,6 +53,8 @@ data: value: INFO - name: CONFIG_FROM_MONGO value: "true" + - name: ENABLE_FULL_WALK + value: "false" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index ce10d78b0..4b03ed67b 100644 --- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -105,6 +105,12 @@ spec: value: "4" - name: PREFETCH_COUNT value: "30" + - name: RESOLVE_TRAP_ADDRESS + value: "false" + - name: MAX_DNS_CACHE_SIZE_TRAPS + value: "500" + - name: TTL_DNS_CACHE_TRAPS + value: "1800" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/inventory/job.yaml index 40062ecdb..bbbb3fc46 100644 --- a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -48,6 +48,8 @@ spec: value: "60" - name: CONFIG_FROM_MONGO value: "false" + - name: ENABLE_FULL_WALK + value: "false" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index ce10d78b0..4b03ed67b 100644 --- a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -105,6 +105,12 @@ spec: value: "4" - name: PREFETCH_COUNT value: "30" + - name: RESOLVE_TRAP_ADDRESS + value: "false" + - name: MAX_DNS_CACHE_SIZE_TRAPS + value: "500" + - name: TTL_DNS_CACHE_TRAPS + value: "1800" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml index 40062ecdb..bbbb3fc46 100644 --- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml +++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/inventory/job.yaml @@ -48,6 +48,8 @@ spec: value: "60" - name: CONFIG_FROM_MONGO value: "false" + - name: ENABLE_FULL_WALK + value: "false" volumeMounts: - name: config mountPath: "/app/config" diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index 6edf0e7bb..cef9e439e 100644 --- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -105,6 +105,12 @@ spec: value: "4" - name: PREFETCH_COUNT value: "30" + - name: RESOLVE_TRAP_ADDRESS + value: "false" + - name: MAX_DNS_CACHE_SIZE_TRAPS + value: "500" + - name: TTL_DNS_CACHE_TRAPS + value: "1800" volumeMounts: - name: config mountPath: "/app/config" diff --git a/splunk_connect_for_snmp/__init__.py b/splunk_connect_for_snmp/__init__.py index 9bb00a9fc..3937f0a08 100644 --- a/splunk_connect_for_snmp/__init__.py +++ b/splunk_connect_for_snmp/__init__.py @@ -15,4 +15,4 @@ # -__version__ = "1.10.0" +__version__ = "1.11.0-beta.6" diff --git a/splunk_connect_for_snmp/common/collection_manager.py b/splunk_connect_for_snmp/common/collection_manager.py index ddaf345d1..f921d6e46 100644 --- a/splunk_connect_for_snmp/common/collection_manager.py +++ b/splunk_connect_for_snmp/common/collection_manager.py @@ -35,7 +35,7 @@ def return_collection_once(self): return collection_elements def return_collection(self): - for retry in range(3): + for _ in range(3): collection_elements = self.return_collection_once() if collection_elements: return collection_elements diff --git a/splunk_connect_for_snmp/common/inventory_processor.py b/splunk_connect_for_snmp/common/inventory_processor.py index 5e8c3a4c2..9206315e5 100644 --- a/splunk_connect_for_snmp/common/inventory_processor.py +++ b/splunk_connect_for_snmp/common/inventory_processor.py @@ -1,5 +1,7 @@ import copy +import logging import os +import sys from contextlib import suppress from csv import DictReader from typing import List @@ -29,6 +31,20 @@ "security_engine", "securityEngine", ] +ENABLE_FULL_WALK = human_bool(os.getenv("ENABLE_FULL_WALK", "false").lower()) +from splunk_connect_for_snmp.common.customised_json_formatter import ( + CustomisedJSONFormatter, +) + +formatter = CustomisedJSONFormatter() +logger = logging.getLogger(__name__) +logger.setLevel("DEBUG") + +# writing to stdout +handler = logging.StreamHandler(sys.stdout) +handler.setLevel("DEBUG") +handler.setFormatter(formatter) +logger.addHandler(handler) def transform_key_to_address(target): @@ -88,10 +104,9 @@ def get_groups_keys(list_of_groups, group_name, inventory_group_port_mapping): class InventoryProcessor: - def __init__(self, group_manager: GroupsManager, logger, inventory_ui_collection): + def __init__(self, group_manager: GroupsManager, inventory_ui_collection): self.inventory_records: List[dict] = [] self.group_manager = group_manager - self.logger = logger self.hosts_from_groups: dict = {} self.inventory_group_port_mapping: dict = {} self.single_hosts: List[dict] = [] @@ -99,11 +114,11 @@ def __init__(self, group_manager: GroupsManager, logger, inventory_ui_collection def get_all_hosts(self): if CONFIG_FROM_MONGO: - self.logger.info("Loading inventory from inventory_ui collection") + logger.info("Loading inventory from inventory_ui collection") ir_reader = list(self.inventory_ui_collection.find({}, {"_id": 0})) else: with open(INVENTORY_PATH, encoding="utf-8") as csv_file: - self.logger.info(f"Loading inventory from {INVENTORY_PATH}") + logger.info(f"Loading inventory from {INVENTORY_PATH}") ir_reader = list(DictReader(csv_file)) for inventory_line in ir_reader: self.process_line(inventory_line) @@ -115,7 +130,7 @@ def get_all_hosts(self): if was_present is None: self.inventory_records.append(source_record) else: - self.logger.warning( + logger.warning( f"Record: {host} has been already configured in group. Skipping..." ) return self.inventory_records, self.inventory_group_port_mapping @@ -124,7 +139,7 @@ def process_line(self, source_record): address = source_record["address"] # Inventory record is commented out if address.startswith("#"): - self.logger.warning(f"Record: {address} is commented out. Skipping...") + logger.warning(f"Record: {address} is commented out. Skipping...") # Address is an IP address elif address[0].isdigit(): self.single_hosts.append(source_record) @@ -146,7 +161,7 @@ def get_group_hosts(self, source_object, group_name): if key in ALLOWED_KEYS_VALUES: host_group_object[key] = group_object[key] else: - self.logger.warning( + logger.warning( f"Key {key} is not allowed to be changed from the group level" ) address = str(group_object["address"]) @@ -156,19 +171,18 @@ def get_group_hosts(self, source_object, group_name): host_group_object["group"] = group_name self.inventory_records.append(host_group_object) else: - self.logger.warning( + logger.warning( f"Group {group_name} doesn't exist in the configuration. Treating {group_name} as a hostname" ) self.single_hosts.append(source_object) class InventoryRecordManager: - def __init__(self, mongo_client, periodic_objects_collection, logger): + def __init__(self, mongo_client, periodic_objects_collection): self.targets_collection = mongo_client.sc4snmp.targets self.inventory_collection = mongo_client.sc4snmp.inventory self.attributes_collection = mongo_client.sc4snmp.attributes self.periodic_object_collection = periodic_objects_collection - self.logger = logger def delete(self, target): address, port = transform_key_to_address(target) @@ -176,7 +190,7 @@ def delete(self, target): self.inventory_collection.delete_one({"address": address, "port": port}) self.targets_collection.delete_many({"address": target}) self.attributes_collection.delete_many({"address": target}) - self.logger.info(f"Deleting record: {target}") + logger.info(f"Deleting record: {target}") def update( self, inventory_record, new_source_record, runtime_profiles, expiry_time_changed @@ -191,13 +205,13 @@ def update( upsert=True, ) if status.matched_count == 0: - self.logger.info(f"New Record {inventory_record} {status.upserted_id}") + logger.info(f"New Record {inventory_record} {status.upserted_id}") elif status.modified_count == 1 and status.upserted_id is None: - self.logger.info(f"Modified Record {inventory_record}") + logger.info(f"Modified Record {inventory_record}") else: - self.logger.info(f"Unchanged Record {inventory_record}") + logger.info(f"Unchanged Record {inventory_record}") if expiry_time_changed: - self.logger.info( + logger.info( f"Task expiry time was modified, generating new tasks for record {inventory_record}" ) else: @@ -211,6 +225,8 @@ def update( def return_walk_profile(self, runtime_profiles, inventory_profiles): walk_profile = None + if ENABLE_FULL_WALK: + return None if inventory_profiles: walk_profiles = [ p @@ -221,4 +237,6 @@ def return_walk_profile(self, runtime_profiles, inventory_profiles): if walk_profiles: # if there's more than one walk profile, we're choosing the last one on the list walk_profile = walk_profiles[-1] + if not walk_profile: + walk_profile = "WalkProfile" return walk_profile diff --git a/splunk_connect_for_snmp/common/inventory_record.py b/splunk_connect_for_snmp/common/inventory_record.py index 37e628bc3..41bcdc47c 100644 --- a/splunk_connect_for_snmp/common/inventory_record.py +++ b/splunk_connect_for_snmp/common/inventory_record.py @@ -94,22 +94,8 @@ def version_validator(cls, value): ) return value - @validator("community", pre=True) - def community_validator(cls, value): - if value is None or (isinstance(value, str) and value.strip() == ""): - return None - else: - return value - - @validator("secret", pre=True) - def secret_validator(cls, value): - if value is None or (isinstance(value, str) and value.strip() == ""): - return None - else: - return value - - @validator("security_engine", pre=True) - def security_engine_validator(cls, value): + @validator("community", "secret", "security_engine", pre=True) + def community_secret_security_engine_validator(cls, value): if value is None or (isinstance(value, str) and value.strip() == ""): return None else: diff --git a/splunk_connect_for_snmp/common/schema_migration.py b/splunk_connect_for_snmp/common/schema_migration.py index 95b47ef1a..91b52f243 100644 --- a/splunk_connect_for_snmp/common/schema_migration.py +++ b/splunk_connect_for_snmp/common/schema_migration.py @@ -90,7 +90,7 @@ def migrate_to_version_2(mongo_client, task_manager): task_manager.rerun_all_walks() -def migrate_to_version_3(mongo_client, task_manager): +def migrate_to_version_3(mongo_client, _): logger.info("Migrating database schema to version 3") attributes_collection = mongo_client.sc4snmp.attributes @@ -106,7 +106,7 @@ def migrate_to_version_4(mongo_client, task_manager): schedules_collection.drop() -def migrate_to_version_5(mongo_client, task_manager): +def migrate_to_version_5(mongo_client, _): logger.info("Migrating database schema to version 5") inventory_collection = mongo_client.sc4snmp.inventory inventory_collection.update_many({}, {"$set": {"group": None}}) diff --git a/splunk_connect_for_snmp/customtaskmanager.py b/splunk_connect_for_snmp/customtaskmanager.py index f4dd2ee8b..f785db0dd 100644 --- a/splunk_connect_for_snmp/customtaskmanager.py +++ b/splunk_connect_for_snmp/customtaskmanager.py @@ -39,7 +39,7 @@ def __delete_all_tasks_of_type(self, task, function_name): def delete_unused_poll_tasks(self, target: str, activeschedules: List[str]): periodic_tasks = RedBeatSchedulerEntry.get_schedules_by_target(target, app=app) for periodic_document in periodic_tasks: - if not periodic_document.task == "splunk_connect_for_snmp.snmp.tasks.poll": + if periodic_document.task != "splunk_connect_for_snmp.snmp.tasks.poll": continue logger.debug(f"Got Schedule: {periodic_document.name}") periodic_document = RedBeatSchedulerEntry.from_key( @@ -73,7 +73,7 @@ def delete_all_walk_tasks(self): def rerun_all_walks(self): periodic_tasks = RedBeatSchedulerEntry.get_schedules() for periodic_document in periodic_tasks: - if not periodic_document.task == "splunk_connect_for_snmp.snmp.tasks.walk": + if periodic_document.task != "splunk_connect_for_snmp.snmp.tasks.walk": continue periodic_document = RedBeatSchedulerEntry.from_key( f"redbeat:{periodic_document.name}", app=app diff --git a/splunk_connect_for_snmp/enrich/tasks.py b/splunk_connect_for_snmp/enrich/tasks.py index 30c55008f..2901fcc0f 100644 --- a/splunk_connect_for_snmp/enrich/tasks.py +++ b/splunk_connect_for_snmp/enrich/tasks.py @@ -145,7 +145,7 @@ def enrich(self, result): cv = current_attributes["fields"][field_key_hash] # if new field_value is different than the previous one, update - if cv and not cv == field_value: + if cv and cv != field_value: # modifed attribute_updates.append( {"$set": {f"fields.{field_key_hash}": field_value}} diff --git a/splunk_connect_for_snmp/inventory/loader.py b/splunk_connect_for_snmp/inventory/loader.py index ef57b21d8..35e19e144 100644 --- a/splunk_connect_for_snmp/inventory/loader.py +++ b/splunk_connect_for_snmp/inventory/loader.py @@ -92,6 +92,7 @@ def configure_ui_database(mongo_client): with open(INVENTORY_PATH, encoding="utf-8") as csv_file: ir_reader = DictReader(csv_file) + all_inventory_lines = [] for inventory_line in ir_reader: for key in INVENTORY_KEYS_TRANSFORM.keys(): if key in inventory_line: @@ -117,7 +118,8 @@ def configure_ui_database(mongo_client): inventory_line["port"] = port inventory_line["walk_interval"] = walk_interval if not inventory_line["address"].startswith("#"): - inventory_ui_collection.insert(inventory_line) + all_inventory_lines.append(inventory_line) + inventory_ui_collection.insert_many(all_inventory_lines) groups = {} all_profiles = {} @@ -183,12 +185,8 @@ def load(): new_groups = groups_manager.return_collection() inventory_ui_collection = mongo_client.sc4snmp.inventory_ui - inventory_processor = InventoryProcessor( - groups_manager, logger, inventory_ui_collection - ) - inventory_record_manager = InventoryRecordManager( - mongo_client, periodic_obj, logger - ) + inventory_processor = InventoryProcessor(groups_manager, inventory_ui_collection) + inventory_record_manager = InventoryRecordManager(mongo_client, periodic_obj) if CONFIG_FROM_MONGO: logger.info(f"Loading inventory from inventory_ui collection") else: diff --git a/splunk_connect_for_snmp/inventory/tasks.py b/splunk_connect_for_snmp/inventory/tasks.py index dccee4dbe..d55ad3a5e 100644 --- a/splunk_connect_for_snmp/inventory/tasks.py +++ b/splunk_connect_for_snmp/inventory/tasks.py @@ -171,25 +171,22 @@ def assign_profiles(ir, profiles, target): elif profile["condition"]["type"] == "field": logger.debug(f"profile is a field condition {profile_name}") - if "state" in target: - if ( + if "state" in target and ( + profile["condition"]["field"].replace(".", "|") in target["state"] + ): + cs = target["state"][ profile["condition"]["field"].replace(".", "|") - in target["state"] - ): - cs = target["state"][ - profile["condition"]["field"].replace(".", "|") - ] - if "value" in cs: - for pattern in profile["condition"]["patterns"]: - result = re.search(pattern, cs["value"]) - if result: - logger.debug(f"Adding smart profile {profile_name}") - add_profile_to_assigned_list( - assigned_profiles, - profile["frequency"], - profile_name, - ) - continue + ] + if "value" in cs: + for pattern in profile["condition"]["patterns"]: + result = re.search(pattern, cs["value"]) + if result: + logger.debug(f"Adding smart profile {profile_name}") + add_profile_to_assigned_list( + assigned_profiles, + profile["frequency"], + profile_name, + ) logger.debug(f"ir.profiles {ir.profiles}") logger.debug(f"profiles {profiles}") @@ -392,7 +389,7 @@ def generate_conditional_profile( profile_varbinds = conditional_profile_body.get("varBinds") profile_frequency = conditional_profile_body.get("frequency") if not profile_varbinds: - raise BadlyFormattedFieldError(f"No varBinds provided in the profile") + raise BadlyFormattedFieldError("No varBinds provided in the profile") filtered_snmp_objects = filter_condition_on_database( mongo_client, address, profile_conditions ) diff --git a/splunk_connect_for_snmp/poller.py b/splunk_connect_for_snmp/poller.py index 20eef65d5..3fb1431df 100644 --- a/splunk_connect_for_snmp/poller.py +++ b/splunk_connect_for_snmp/poller.py @@ -27,17 +27,11 @@ from celery import Celery from celery.utils.log import get_task_logger from opentelemetry import trace - -# from opentelemetry.exporter.jaeger.thrift import JaegerExporter from opentelemetry.sdk.trace import TracerProvider from splunk_connect_for_snmp.celery_signals_handlers import * -# from opentelemetry.sdk.trace.export import BatchSpanProcessor - provider = TracerProvider() -# processor = BatchSpanProcessor(JaegerExporter()) -# provider.add_span_processor(processor) trace.set_tracer_provider(provider) logger = get_task_logger(__name__) diff --git a/splunk_connect_for_snmp/profiles/walk.yaml b/splunk_connect_for_snmp/profiles/walk.yaml new file mode 100644 index 000000000..7c701621b --- /dev/null +++ b/splunk_connect_for_snmp/profiles/walk.yaml @@ -0,0 +1,5 @@ +WalkProfile: + condition: + type: "walk" + varBinds: + - ['SNMPv2-MIB'] \ No newline at end of file diff --git a/splunk_connect_for_snmp/snmp/auth.py b/splunk_connect_for_snmp/snmp/auth.py index b6974cc5f..f9da9892d 100644 --- a/splunk_connect_for_snmp/snmp/auth.py +++ b/splunk_connect_for_snmp/snmp/auth.py @@ -69,7 +69,7 @@ def get_security_engine_id(logger, ir: InventoryRecord, snmpEngine: SnmpEngine): # Send probe SNMP request with invalid credentials authData = UsmUserData("non-existing-user") - errorIndication, errorStatus, errorIndex, varBinds = next( + errorIndication, _, _, _ = next( getCmd( snmpEngine, authData, @@ -80,17 +80,19 @@ def get_security_engine_id(logger, ir: InventoryRecord, snmpEngine: SnmpEngine): ) # See if our SNMP engine received REPORT PDU containing securityEngineId - securityEngineId = fetch_security_engine_id(observerContext, errorIndication) - logger.debug(f"securityEngineId={securityEngineId}") + securityEngineId = fetch_security_engine_id( + observerContext, errorIndication, ir.address + ) + logger.debug(f"securityEngineId={securityEngineId} for device {ir.address}") return securityEngineId -def fetch_security_engine_id(observer_context, errorIndication): +def fetch_security_engine_id(observer_context, errorIndication, ipaddress): if "securityEngineId" in observer_context: return observer_context["securityEngineId"] else: raise SnmpActionError( - f"Can't discover peer EngineID, errorIndication: {errorIndication}" + f"Can't discover peer EngineID for device {ipaddress}, errorIndication: {errorIndication}" ) diff --git a/splunk_connect_for_snmp/snmp/manager.py b/splunk_connect_for_snmp/snmp/manager.py index 5167f2d3f..4ceb05cac 100644 --- a/splunk_connect_for_snmp/snmp/manager.py +++ b/splunk_connect_for_snmp/snmp/manager.py @@ -298,6 +298,7 @@ def do_work( ): retry = False address = transform_address_to_key(ir.address, ir.port) + logger.info(f"Preparing task for {ir.address}") if time.time() - self.last_modified > PROFILES_RELOAD_DELAY or walk: self.profiles = self.profiles_manager.return_collection() @@ -341,7 +342,7 @@ def do_work( ir.address, walk, ): - tmp_retry, tmp_mibs, _ = self.process_snmp_data( + _, tmp_mibs, _ = self.process_snmp_data( varBindTable, metrics, address, bulk_mapping ) if tmp_mibs: @@ -443,10 +444,10 @@ def process_snmp_data(self, varBindTable, metrics, target, mapping={}): for varBind in varBindTable: mib, metric, index = varBind[0].getMibSymbol() - id = varBind[0].prettyPrint() + varBind_id = varBind[0].prettyPrint() oid = str(varBind[0].getOid()) - if isMIBResolved(id): + if isMIBResolved(varBind_id): group_key = get_group_key(mib, oid, index) if group_key not in metrics: indexes = extract_indexes(index) @@ -471,7 +472,7 @@ def process_snmp_data(self, varBindTable, metrics, target, mapping={}): profile = None if mapping: profile = mapping.get( - id.replace('"', ""), + varBind_id.replace('"', ""), mapping.get(f"{mib}::{metric}", mapping.get(mib)), ) # when varbind name differs from mib-family, @@ -510,7 +511,7 @@ def process_snmp_data(self, varBindTable, metrics, target, mapping={}): f"Exception processing data from {target} {varBind}" ) else: - found, mib = self.is_mib_known(id, oid, target) + found, mib = self.is_mib_known(varBind_id, oid, target) if mib and mib not in remotemibs: remotemibs.append(mib) if found: diff --git a/splunk_connect_for_snmp/snmp/tasks.py b/splunk_connect_for_snmp/snmp/tasks.py index c8d4c3463..83898218c 100644 --- a/splunk_connect_for_snmp/snmp/tasks.py +++ b/splunk_connect_for_snmp/snmp/tasks.py @@ -47,7 +47,7 @@ WALK_RETRY_MAX_INTERVAL = int(os.getenv("WALK_RETRY_MAX_INTERVAL", "180")) WALK_MAX_RETRIES = int(os.getenv("WALK_MAX_RETRIES", "5")) SPLUNK_SOURCETYPE_TRAPS = os.getenv("SPLUNK_SOURCETYPE_TRAPS", "sc4snmp:traps") -OID_VALIDATOR = re.compile(r"^([0-2])((\.0)|(\.[1-9][0-9]*))*$") +OID_VALIDATOR = re.compile(r"^([0-2])((\.0)|(\.[1-9]\d*))*$") RESOLVE_TRAP_ADDRESS = os.getenv("RESOLVE_TRAP_ADDRESS", "false") MAX_DNS_CACHE_SIZE_TRAPS = int(os.getenv("MAX_DNS_CACHE_SIZE_TRAPS", "100")) TTL_DNS_CACHE_TRAPS = int(os.getenv("TTL_DNS_CACHE_TRAPS", "1800")) diff --git a/splunk_connect_for_snmp/snmp/varbinds_resolver.py b/splunk_connect_for_snmp/snmp/varbinds_resolver.py index fb87c9be5..7f58a7269 100644 --- a/splunk_connect_for_snmp/snmp/varbinds_resolver.py +++ b/splunk_connect_for_snmp/snmp/varbinds_resolver.py @@ -45,19 +45,18 @@ def insert_varbind(self, varbind): """ mapping_key = varbind.mapping_key() if mapping_key in self.map: - print(f"Element {mapping_key} already in the varbind container") + logger.debug(f"Element {mapping_key} already in the varbind container") + return + if len(varbind.list) > 1 and varbind.list[0] in self.map: + logger.debug( + f"Element {mapping_key} not added as {varbind.list[0]} is already in the varbind container" + ) return - if len(varbind.list) > 1: - if varbind.list[0] in self.map: - print( - f"Element {mapping_key} not added as {varbind.list[0]} is already in the varbind container" - ) - return if len(varbind.list) > 2: varbind_tmp = Varbind(varbind.list[:2]) mapping_key_for_two = varbind_tmp.mapping_key() if mapping_key_for_two in self.map: - print( + logger.debug( f"Element {mapping_key} not added as {mapping_key_for_two} is already in the varbind container" ) return diff --git a/splunk_connect_for_snmp/splunk/tasks.py b/splunk_connect_for_snmp/splunk/tasks.py index d38ab5239..c9b9398c3 100644 --- a/splunk_connect_for_snmp/splunk/tasks.py +++ b/splunk_connect_for_snmp/splunk/tasks.py @@ -150,7 +150,6 @@ def do_send(data, destination_url, self): # 200 is good if response.status_code in (200, 202): logger.debug(f"Response code is {response.status_code} {response.text}") - pass # These errors can't be retried elif response.status_code in (403, 401, 400): logger.error(f"Response code is {response.status_code} {response.text}") diff --git a/splunk_connect_for_snmp/traps.py b/splunk_connect_for_snmp/traps.py index c93cfd5bc..f3cb03a83 100644 --- a/splunk_connect_for_snmp/traps.py +++ b/splunk_connect_for_snmp/traps.py @@ -33,13 +33,10 @@ from celery import Celery, chain from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider - -# from opentelemetry.sdk.trace.export import BatchSpanProcessor from pysnmp.carrier.asyncio.dgram import udp from pysnmp.entity import config, engine from pysnmp.entity.rfc3413 import ntfrcv -from splunk_connect_for_snmp import * from splunk_connect_for_snmp.snmp.const import AuthProtocolMap, PrivProtocolMap from splunk_connect_for_snmp.snmp.tasks import trap from splunk_connect_for_snmp.splunk.tasks import prepare, send @@ -69,9 +66,6 @@ # Callback function for receiving notifications # noinspection PyUnusedLocal def cbFun(snmpEngine, stateReference, contextEngineId, contextName, varBinds, cbCtx): - transportDomain, transportAddress = snmpEngine.msgAndPduDsp.getTransportInfo( - stateReference - ) logging.debug( 'Notification from ContextEngineId "%s", ContextName "%s"' % (contextEngineId.prettyPrint(), contextName.prettyPrint()) @@ -142,11 +136,10 @@ def main(): with open(CONFIG_PATH, encoding="utf-8") as file: config_base = yaml.safe_load(file) idx = 0 - if "communities" in config_base: - if "2c" in config_base["communities"]: - for community in config_base["communities"]["2c"]: - idx += 1 - config.addV1System(snmpEngine, idx, community) + if "communities" in config_base and "2c" in config_base["communities"]: + for community in config_base["communities"]["2c"]: + idx += 1 + config.addV1System(snmpEngine, idx, community) if "usernameSecrets" in config_base: for secret in config_base["usernameSecrets"]: diff --git a/test/common/test_inventory_processor.py b/test/common/test_inventory_processor.py index e4b337191..657a76c29 100644 --- a/test/common/test_inventory_processor.py +++ b/test/common/test_inventory_processor.py @@ -1,7 +1,11 @@ +import logging import os from unittest import TestCase, mock from unittest.mock import Mock, mock_open, patch +import pytest +from _pytest.logging import caplog + from splunk_connect_for_snmp.common.inventory_processor import ( InventoryProcessor, InventoryRecordManager, @@ -18,7 +22,8 @@ 0.0.0.0,,2c,public,,,1805,solo_profile1,False,False 0.0.0.0,1161,2c,public,,,1805,solo_profile2,False,False""" - +# TODO: write new test for walkProfile and full walk with flag +@pytest.mark.usefixtures("caplog") class TestInventoryProcessor(TestCase): profiles = { "test5": {"frequency": 6, "varBinds": [["IP-MIB"]]}, @@ -210,7 +215,7 @@ def test_get_group_hosts(self): "delete": "", }, ] - inventory_processor = InventoryProcessor(group_manager, Mock(), Mock()) + inventory_processor = InventoryProcessor(group_manager, Mock()) group_manager.return_element.return_value = [ { "group1": [ @@ -224,9 +229,13 @@ def test_get_group_hosts(self): inventory_processor.inventory_records, group_object_returned ) - def test_get_group_hosts_hostname(self): + # @mock.patch( + # "splunk_connect_for_snmp.common.inventory_processor.ENABLE_FULL_WALK", + # True, + # ) + @mock.patch("splunk_connect_for_snmp.common.inventory_processor.logger") + def test_get_group_hosts_hostname(self, logger): group_manager = Mock() - logger = Mock() group_object = { "address": "ec2-54-91-99-115.compute-1.amazonaws.com", "port": "", @@ -239,7 +248,7 @@ def test_get_group_hosts_hostname(self): "SmartProfiles": "f", "delete": "", } - inventory_processor = InventoryProcessor(group_manager, logger, Mock()) + inventory_processor = InventoryProcessor(group_manager, Mock()) group_manager.return_element.return_value = [] inventory_processor.get_group_hosts( group_object, "ec2-54-91-99-115.compute-1.amazonaws.com" @@ -250,10 +259,14 @@ def test_get_group_hosts_hostname(self): self.assertEqual(inventory_processor.single_hosts, [group_object]) self.assertEqual(inventory_processor.inventory_records, []) - def test_process_line_comment(self): - logger = Mock() + # @mock.patch( + # "splunk_connect_for_snmp.common.inventory_processor.ENABLE_FULL_WALK", + # True, + # ) + @mock.patch("splunk_connect_for_snmp.common.inventory_processor.logger") + def test_process_line_comment(self, logger): source_record = {"address": "#54.234.85.76"} - inventory_processor = InventoryProcessor(Mock(), logger, Mock()) + inventory_processor = InventoryProcessor(Mock(), Mock()) inventory_processor.process_line(source_record) logger.warning.assert_called_with( "Record: #54.234.85.76 is commented out. Skipping..." @@ -272,13 +285,13 @@ def test_process_line_comment(self): ) def test_process_line_host(self, m_inventory): source_record = {"address": "54.234.85.76"} - inventory_processor = InventoryProcessor(Mock(), Mock(), Mock()) + inventory_processor = InventoryProcessor(Mock(), Mock()) inventory_processor.get_all_hosts() self.assertEqual(inventory_processor.inventory_records, [source_record]) def test_process_line_group(self): source_record = {"address": "group1"} - inventory_processor = InventoryProcessor(Mock(), Mock(), Mock()) + inventory_processor = InventoryProcessor(Mock(), Mock()) inventory_processor.get_group_hosts = Mock() inventory_processor.process_line(source_record) inventory_processor.get_group_hosts.assert_called_with(source_record, "group1") @@ -307,7 +320,7 @@ def test_ignore_line_host_configured_in_group(self, m_load_element): ] group_manager = Mock() group_manager.return_element.return_value = returned_group - inventory_processor = InventoryProcessor(group_manager, Mock(), Mock()) + inventory_processor = InventoryProcessor(group_manager, Mock()) expected = [ { "address": "0.0.0.0", @@ -351,19 +364,23 @@ def test_ignore_line_host_configured_in_group(self, m_load_element): inventory_processor.get_all_hosts() self.assertEqual(expected, inventory_processor.inventory_records) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.ENABLE_FULL_WALK", + True, + ) def test_return_walk_profile(self): inventory_profiles = ["walk1", "generic_switch"] - inventory_record_manager = InventoryRecordManager(Mock(), Mock(), Mock()) + inventory_record_manager = InventoryRecordManager(Mock(), Mock()) self.assertEqual( inventory_record_manager.return_walk_profile( self.profiles, inventory_profiles ), - "walk1", + None, ) - def test_return_walk_profile_more_than_one(self): + def test_return_walk_profile_more_than_one_no_enable(self): inventory_profiles = ["walk1", "test_33", "generic_switch"] - inventory_record_manager = InventoryRecordManager(Mock(), Mock(), Mock()) + inventory_record_manager = InventoryRecordManager(Mock(), Mock()) self.assertEqual( inventory_record_manager.return_walk_profile( self.profiles, inventory_profiles @@ -371,9 +388,13 @@ def test_return_walk_profile_more_than_one(self): "test_33", ) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.ENABLE_FULL_WALK", + True, + ) def test_return_walk_profile_no_walk_in_inventory(self): inventory_profiles = ["generic_switch"] - inventory_record_manager = InventoryRecordManager(Mock(), Mock(), Mock()) + inventory_record_manager = InventoryRecordManager(Mock(), Mock()) self.assertEqual( inventory_record_manager.return_walk_profile( self.profiles, inventory_profiles @@ -381,9 +402,13 @@ def test_return_walk_profile_no_walk_in_inventory(self): None, ) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.ENABLE_FULL_WALK", + True, + ) def test_return_walk_profile_no_walk_in_config(self): inventory_profiles = ["generic_switch", "walk2"] - inventory_record_manager = InventoryRecordManager(Mock(), Mock(), Mock()) + inventory_record_manager = InventoryRecordManager(Mock(), Mock()) self.assertEqual( inventory_record_manager.return_walk_profile( self.profiles, inventory_profiles @@ -391,22 +416,77 @@ def test_return_walk_profile_no_walk_in_config(self): None, ) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.ENABLE_FULL_WALK", + True, + ) def test_return_walk_profile_no_config(self): inventory_profiles = ["generic_switch", "walk2"] - inventory_record_manager = InventoryRecordManager(Mock(), Mock(), Mock()) + inventory_record_manager = InventoryRecordManager(Mock(), Mock()) self.assertEqual( inventory_record_manager.return_walk_profile({}, inventory_profiles), None ) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.ENABLE_FULL_WALK", + True, + ) def test_return_walk_profile_no_config_no_inventory(self): inventory_profiles = [] - inventory_record_manager = InventoryRecordManager(Mock(), Mock(), Mock()) + inventory_record_manager = InventoryRecordManager(Mock(), Mock()) self.assertEqual( inventory_record_manager.return_walk_profile({}, inventory_profiles), None ) + @mock.patch( + "splunk_connect_for_snmp.common.inventory_processor.ENABLE_FULL_WALK", + True, + ) def test_return_walk_profile_no_inventory(self): - inventory_record_manager = InventoryRecordManager(Mock(), Mock(), Mock()) + inventory_record_manager = InventoryRecordManager(Mock(), Mock()) self.assertEqual( inventory_record_manager.return_walk_profile(self.profiles, []), None ) + + def test_return_walk_profile_no_enable(self): + inventory_profiles = ["walk1", "generic_switch"] + inventory_record_manager = InventoryRecordManager(Mock(), Mock()) + self.assertEqual( + inventory_record_manager.return_walk_profile( + self.profiles, inventory_profiles + ), + "walk1", + ) + + def test_return_walk_profile_no_walk_in_inventory_no_enable(self): + inventory_profiles = ["generic_switch"] + inventory_record_manager = InventoryRecordManager(Mock(), Mock()) + self.assertEqual( + inventory_record_manager.return_walk_profile( + self.profiles, inventory_profiles + ), + "WalkProfile", + ) + + def test_return_walk_profile_no_config_no_enable(self): + inventory_profiles = ["generic_switch", "walk2"] + inventory_record_manager = InventoryRecordManager(Mock(), Mock()) + self.assertEqual( + inventory_record_manager.return_walk_profile({}, inventory_profiles), + "WalkProfile", + ) + + def test_return_walk_profile_no_config_no_inventory_no_enable(self): + inventory_profiles = [] + inventory_record_manager = InventoryRecordManager(Mock(), Mock()) + self.assertEqual( + inventory_record_manager.return_walk_profile({}, inventory_profiles), + "WalkProfile", + ) + + def test_return_walk_profile_no_inventory_no_enable(self): + inventory_record_manager = InventoryRecordManager(Mock(), Mock()) + self.assertEqual( + inventory_record_manager.return_walk_profile(self.profiles, []), + "WalkProfile", + ) diff --git a/test/snmp/test_auth.py b/test/snmp/test_auth.py index 58400943b..d7ab04b88 100644 --- a/test/snmp/test_auth.py +++ b/test/snmp/test_auth.py @@ -135,14 +135,16 @@ def test_get_security_engine_id(self, m_fetch, m_get_cmd): self.assertEqual(result, "My test value") def test_fetch_security_engine_id(self): - result = fetch_security_engine_id({"securityEngineId": "some_value"}, None) + result = fetch_security_engine_id( + {"securityEngineId": "some_value"}, None, "127.0.0.1" + ) self.assertEqual(result, "some_value") def test_fetch_security_engine_id_missing(self): with self.assertRaises(SnmpActionError) as e: - fetch_security_engine_id({}, "Some error") + fetch_security_engine_id({}, "Some error", "127.0.0.1") self.assertEqual( - "Can't discover peer EngineID, errorIndication: Some error", + "Can't discover peer EngineID for device 127.0.0.1, errorIndication: Some error", e.exception.args[0], ) diff --git a/ui_tests/logger/logger.py b/ui_tests/logger/logger.py index ba6e261c5..86e762ebe 100644 --- a/ui_tests/logger/logger.py +++ b/ui_tests/logger/logger.py @@ -15,7 +15,6 @@ def initialize_logger(cls): handler = logging.StreamHandler(sys.stdout) handler.setFormatter(formatter) logger.addHandler(handler) - # return logger cls.logger = logger @classmethod diff --git a/ui_tests/pages/helper.py b/ui_tests/pages/helper.py index 779fab440..f5b578f91 100644 --- a/ui_tests/pages/helper.py +++ b/ui_tests/pages/helper.py @@ -7,5 +7,5 @@ def clear_input(input_element): logger.info("Clearing input") text = input_element.get_attribute("value") - for num in range(len(text)): + for _ in range(len(text)): input_element.send_keys(Keys.BACKSPACE) diff --git a/ui_tests/pages/profiles_page.py b/ui_tests/pages/profiles_page.py index 8c29eca0c..c0b43d409 100644 --- a/ui_tests/pages/profiles_page.py +++ b/ui_tests/pages/profiles_page.py @@ -191,7 +191,7 @@ def check_if_frequency_setting_field_is_visible(self): try: freq_field = driver.find_element(By.XPATH, xpath) return freq_field.is_displayed() - except Exception as e: + except Exception: return False def add_condition(self, field_value, operation, value): diff --git a/ui_tests/requirements.txt b/ui_tests/requirements.txt index d23ff5c49..ca102b7a4 100644 --- a/ui_tests/requirements.txt +++ b/ui_tests/requirements.txt @@ -1,4 +1,5 @@ pytest-splunk-addon selenium webdriver_manager -pyyaml \ No newline at end of file +pyyaml +deprecation \ No newline at end of file diff --git a/ui_tests/tests/test_splunk_integration.py b/ui_tests/tests/test_splunk_integration.py index 2738ce1d7..c8a28da12 100644 --- a/ui_tests/tests/test_splunk_integration.py +++ b/ui_tests/tests/test_splunk_integration.py @@ -288,7 +288,6 @@ def test_setting_host_in_inventory(setup): host = setup["device_simulator"] community = "public" - new_community = "test1234" profile_1_name = "standard_profile_10s" profile_1_freq = 10 profile_2_name = "standard_profile_7s" diff --git a/ui_tests/webdriver/webriver_factory.py b/ui_tests/webdriver/webriver_factory.py index 638c7be69..f1ee13749 100644 --- a/ui_tests/webdriver/webriver_factory.py +++ b/ui_tests/webdriver/webriver_factory.py @@ -25,7 +25,6 @@ def get_driver(cls): chrome_options.add_argument("--headless") chrome_options.add_argument("--disable-gpu") chrome_options.add_argument("--window-size=1920x1080") - # web_driver = webdriver.Chrome(options=chrome_options) cls._driver = webdriver.Chrome( service=ChromeService(ChromeDriverManager().install()),