From caa0d27329ad29949d2002d6d34502b07fd8de76 Mon Sep 17 00:00:00 2001 From: Chuck Daniels Date: Thu, 31 Oct 2024 16:34:52 -0400 Subject: [PATCH] CDK v1 to v2 (#19) Configure GH actions for testing/deployment (#4) Updated GitHub Actions to trigger testing and deployment for appropriate actions: - Run tests on code in "head" branch for all PRs to `develop` or `main` (on open, reopen, edit, or synch of a PR) - Run tests on code in "target" branch for all PRs closed/merged to `develop` or `main` - Deploy to `dev` env on PR close/merge to `develop` - Deploy to `prod` env on tag/release on `main` Use GitHub OIDC for AWS credentials (#5) - Allow manual workflow trigger in GitHub - Add `TOX_OPTS` env var to `Makefile` to allow override of `tox` options from command line when running `make` - Restrict integration tests to develop, main branches because OIDC trust policy is restricted to those branches - Add permissions boundary to integration test resources stack (#6) Run integration tests on PR (#7) - To avoid having to wait for PR approval and merging to `develop` or `main`, run integration tests when PR is opened or updated. - Ensure integration test resources are destroyed even when the stack under test fails to deploy. - Simplify managed policy construction - Export secrets as env vars for int tests - Rename variable named the same as a builtin - Remove redundant removal policy - Add forward notification stack (#13) - Update versions in pre-commit config: The previous version of the isort pre-commit hook was causing pre-commit to exit with a `CalledProcessError`. Running `pre-commit autoupdate` to update the hook versions fixed the problem, allowing pre-commit to again run successfully. - Fix code coverage "no data collected" warning - Upgrade GitHub actions; cache Python deps - Fix broken historical test - Refactor "historical" files to avoid conflicts w/"forward" files - Pinned major versions of dependencies: The `moto` library recently released version 5.x, containing breaking changes, so it caused unit tests to fail due to import errors. Given the need to pin `moto` to 4.x, I also pinned all other dependencies to avoid other such problems. - Make tiler queue optional (#14) If no value is supplied for the env var HLS_LPDAAC_TILER_QUEUE_ARN, a dummy queue will be created and used. This is necessary because there is only such a queue in production, so other envs need a dummy queue. - Remove instructions on manual deployment from EC2 instance Migrate CDK v1 to v2 (#17) - Add CDK v2 bootstrap template (with MCP-compatible adjustments from the default template) - Add CDK bootstrap command to Makefile - Migrate from aws-cdk v1 to v2 packages Bump CDK and Node versions (#20) --- .github/workflows/deploy.yml | 68 ++ .github/workflows/main.yml | 173 ++++- .gitignore | 2 +- .pre-commit-config.yaml | 8 +- .vscode/settings.json | 14 +- Makefile | 93 +-- README.md | 52 +- cdk.json | 11 +- cdk/app_ci.py | 36 - cdk/app_forward.py | 37 ++ cdk/app_forward_it.py | 44 ++ cdk/{app.py => app_historical.py} | 11 +- cdk/app_historical_it.py | 41 ++ cdk/bootstrap-template.yaml | 629 ++++++++++++++++++ cdk/bootstrap.py | 2 + cdk/stacks/__init__.py | 14 +- cdk/stacks/forward_notification.py | 67 ++ cdk/stacks/forward_notification_it.py | 60 ++ ...ac_stack.py => historical_notification.py} | 21 +- cdk/stacks/historical_notification_it.py | 52 ++ cdk/stacks/hls_lpdaac_stack_ci.py | 28 - setup.py | 49 +- src/hls_lpdaac/forward/__init__.py | 3 + src/hls_lpdaac/forward/index.py | 45 ++ src/hls_lpdaac/historical/index.py | 8 +- tests/integration/test_forward_lambda.py | 73 ++ tests/integration/test_historical_lambda.py | 32 +- tests/unit/conftest.py | 4 +- tests/unit/test_forward_handler.py | 32 + tests/unit/test_forward_stack.py | 48 ++ tests/unit/test_historical_handler.py | 4 +- ...daac_stack.py => test_historical_stack.py} | 8 +- tox.ini | 24 +- 33 files changed, 1548 insertions(+), 245 deletions(-) create mode 100644 .github/workflows/deploy.yml delete mode 100755 cdk/app_ci.py create mode 100755 cdk/app_forward.py create mode 100755 cdk/app_forward_it.py rename cdk/{app.py => app_historical.py} (67%) create mode 100755 cdk/app_historical_it.py create mode 100644 cdk/bootstrap-template.yaml create mode 100644 cdk/bootstrap.py create mode 100644 cdk/stacks/forward_notification.py create mode 100644 cdk/stacks/forward_notification_it.py rename cdk/stacks/{hls_lpdaac_stack.py => historical_notification.py} (78%) create mode 100644 cdk/stacks/historical_notification_it.py delete mode 100644 cdk/stacks/hls_lpdaac_stack_ci.py create mode 100644 src/hls_lpdaac/forward/__init__.py create mode 100644 src/hls_lpdaac/forward/index.py create mode 100644 tests/integration/test_forward_lambda.py create mode 100644 tests/unit/test_forward_handler.py create mode 100644 tests/unit/test_forward_stack.py rename tests/unit/{test_hls_lpdaac_stack.py => test_historical_stack.py} (90%) diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000..e887408 --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,68 @@ +name: Deploy + +on: + workflow_dispatch: + workflow_call: + inputs: + environment: + required: true + type: string + PYTHON_VERSION: + required: true + type: string + TOX_MIN_VERSION: + required: true + type: string + +defaults: + run: + shell: bash + +# See https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services#updating-your-github-actions-workflow +permissions: + id-token: write # required for requesting the JWT + contents: read # required for actions/checkout + +jobs: + deploy: + runs-on: ubuntu-22.04 + environment: ${{ inputs.environment }} + steps: + - name: Checkout source + uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "${{ inputs.PYTHON_VERSION }}" + cache: 'pip' + cache-dependency-path: setup.py + - name: Install dependencies + run: | + pip install "tox>=${{ inputs.TOX_MIN_VERSION }}" + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-region: ${{ vars.AWS_DEFAULT_REGION }} + role-to-assume: ${{ vars.AWS_ROLE_TO_ASSUME_ARN }} + role-session-name: ${{ github.actor }} + - name: Convert secrets to environment variables + env: + SECRETS_JSON: ${{ toJson(secrets) }} + run: | + while read -rd $'' line; do + echo "$line" >> $GITHUB_ENV + done < <( + jq -r <<<"$SECRETS_JSON" 'to_entries|map("\(.key)=\(.value)\u0000")[]' + ) + - name: Convert vars to environment variables + env: + VARS_JSON: ${{ toJson(vars) }} + run: | + while read -rd $'' line; do + echo "$line" >> $GITHUB_ENV + done < <( + jq -r <<<"$VARS_JSON" 'to_entries|map("\(.key)=\(.value)\u0000")[]' + ) + - name: Deploy forward notification to ${{ inputs.environment }} + run: | + make deploy-forward diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index fb1c550..a335621 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,26 +1,167 @@ # Install Python dependencies, run tests, and lint with a single version of Python. # See https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions -name: HLS LPDAAC Historical +name: HLS LPDAAC Notifications on: + workflow_dispatch: + release: + types: + - published push: - branches: [ main ] + branches: + - main + - develop + tags-ignore: + - '*' + paths: + - '.github/workflows/*' + - 'cdk/**' + - 'src/**' + - 'cdk.json' + - 'Makefile' + - 'setup.py' + - 'tox.ini' pull_request: - branches: [ main ] + types: + - edited + - opened + - reopened + - synchronize + branches: + - main + - develop + paths: + - '.github/workflows/*' + - 'cdk/**' + - 'src/**' + - 'cdk.json' + - 'Makefile' + - 'setup.py' + - 'tox.ini' + +# See https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services#updating-your-github-actions-workflow +permissions: + id-token: write # required for requesting the JWT + contents: read # required for actions/checkout + +defaults: + run: + shell: bash jobs: - build: - runs-on: ubuntu-latest + config: + # This is a hack to work around the lack of support for two other possiblities for + # avoiding duplication of configuration values: + # + # (1) YAML anchors (https://yaml.org/spec/1.1/current.html#id899912) and aliases + # (https://yaml.org/spec/1.1/current.html#id902561) + # (2) Availability of `env` context within `jobs..with.` (see + # https://docs.github.com/en/actions/learn-github-actions/contexts#context-availability) + # + # Alternative hack: https://github.com/actions/runner/issues/1182#issuecomment-1262870831 + runs-on: ubuntu-22.04 + outputs: + PYTHON_VERSION: "3.9" + TOX_MIN_VERSION: "3.18.0" # `allowlist_externals` replaces `whitelist_externals` + steps: + - name: Configure shared values + run: "" # Nothing to do, but at least one step is required + + unit-tests: + runs-on: ubuntu-22.04 + needs: config steps: - - uses: actions/checkout@v2 - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: "3.9" - - name: Install dependencies - run: | - pip install tox - - name: Run tests - run: | - tox -e py + - name: Checkout source + uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "${{ needs.config.outputs.PYTHON_VERSION }}" + cache: 'pip' + cache-dependency-path: setup.py + - name: Install dependencies + run: | + pip install "tox>=${{ needs.config.outputs.TOX_MIN_VERSION }}" + - name: Run unit tests + run: | + make unit-tests + + integration-tests: + runs-on: ubuntu-22.04 + environment: dev-forward + needs: config + steps: + - name: Checkout source + uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "${{ needs.config.outputs.PYTHON_VERSION }}" + cache: 'pip' + cache-dependency-path: setup.py + - name: Install dependencies + run: | + pip install "tox>=${{ needs.config.outputs.TOX_MIN_VERSION }}" + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-region: ${{ vars.AWS_DEFAULT_REGION }} + role-to-assume: ${{ vars.AWS_ROLE_TO_ASSUME_ARN }} + role-session-name: ${{ github.actor }} + - name: Convert secrets to environment variables + env: + SECRETS_JSON: ${{ toJson(secrets) }} + run: | + while read -rd $'' line; do + echo "$line" >> $GITHUB_ENV + done < <( + jq -r <<<"$SECRETS_JSON" 'to_entries|map("\(.key)=\(.value)\u0000")[]' + ) + - name: Convert vars to environment variables + env: + VARS_JSON: ${{ toJson(vars) }} + run: | + while read -rd $'' line; do + echo "$line" >> $GITHUB_ENV + done < <( + jq -r <<<"$VARS_JSON" 'to_entries|map("\(.key)=\(.value)\u0000")[]' + ) + - name: Deploy forward notification integration test stack + run: | + make deploy-forward-it + - name: Run forward notification integration tests + run: | + make forward-integration-tests + - name: Destroy forward notification integration test stack + if: '!cancelled()' + run: | + make destroy-forward-it + + deploy-dev: + # Deploy to Dev only on push (including merged PR) to `develop` branch + if: github.event_name == 'push' && github.event.ref == 'refs/heads/develop' + needs: + - config + - unit-tests + - integration-tests + uses: ./.github/workflows/deploy.yml + with: + environment: dev-forward + PYTHON_VERSION: "${{ needs.config.outputs.PYTHON_VERSION }}" + TOX_MIN_VERSION: "${{ needs.config.outputs.TOX_MIN_VERSION }}" + secrets: inherit + + deploy-prod: + # Deploy to Prod only on publishing a release (tag) on `main` branch + if: github.event_name == 'release' && github.event.action == 'published' + needs: + - config + - unit-tests + - integration-tests + uses: ./.github/workflows/deploy.yml + with: + environment: prod-forward + PYTHON_VERSION: "${{ needs.config.outputs.PYTHON_VERSION }}" + TOX_MIN_VERSION: "${{ needs.config.outputs.TOX_MIN_VERSION }}" + secrets: inherit diff --git a/.gitignore b/.gitignore index 456dd36..0fa6c3a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,9 @@ .envrc +.python-version ### CDK ### *.swp package-lock.json -cdk.context.json # CDK asset staging directory .cdk.staging diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ccb2269..bf735ee 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ default_stages: [commit] repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.1.0 + rev: v4.5.0 hooks: - id: check-executables-have-shebangs - id: check-merge-conflict @@ -12,14 +12,14 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/psf/black - rev: 22.3.0 + rev: 24.1.0 hooks: - id: black - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + rev: 5.13.0 hooks: - id: isort - repo: https://github.com/PyCQA/flake8 - rev: 4.0.1 + rev: 7.0.0 hooks: - id: flake8 diff --git a/.vscode/settings.json b/.vscode/settings.json index 4d59e85..5b5290b 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,16 +1,10 @@ { - "deepscan.enable": true, - "python.formatting.provider": "black", - "python.linting.mypyEnabled": true, - "python.linting.enabled": true, - "python.linting.mypyArgs": [ - "--show-column-numbers", - "src", - "tests" - ], "python.analysis.typeCheckingMode": "basic", "python.analysis.extraPaths": [ "./src", "./tests" - ] + ], + "yaml.schemas": { + "https://json.schemastore.org/github-workflow.json": "file://./.github/workflows/deploy.yml" + } } diff --git a/Makefile b/Makefile index 4be4f2c..6881611 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,12 @@ +APPS=$(subst _,-,$(patsubst cdk/app_%.py,%,$(wildcard cdk/app_*.py))) +IT_APPS=$(subst _,-,$(patsubst cdk/app_%.py,%,$(wildcard cdk/app_*_it.py))) +RECREATE= SHELL=/usr/bin/env bash +TOX=tox $(TOX_OPTS) +TOX_OPTS?= +VENV_TOX_LOG_LOCK=.venv/.tox-info.json -.PHONY: help tox +.PHONY: help bootstrap install-cdk install-node tox unit-tests venv .DEFAULT_GOAL := help help: Makefile @@ -13,58 +19,65 @@ help: Makefile @echo "Targets:" @sed -n 's/^##//p' $< | column -t -s ':' | sed -e 's/^/ /' @echo + @printf " where APP is one of the following:\n$(patsubst %,\n - %,$(APPS))\n" + @echo + +# Set the tox --recreate option when setup.py is newer than the tox log lock +# file in the virtualenv, as that indicates it was updated since the last time +# tox was run. This allows us to develop more quickly by avoiding unnecessary +# environment recreation, while ensuring that the environment is recreated when +# necessary (i.e., when dependencies change). +$(VENV_TOX_LOG_LOCK): setup.py + $(eval RECREATE := --recreate) + +# Rules that run a tox command should depend on this to make sure the virtualenv +# is updated when necessary, without unnecessarily specifying the tox --recreate +# option explicitly. +venv: $(VENV_TOX_LOG_LOCK) tox: - if [ -z $${TOX_ENV_DIR+x} ]; then echo "ERROR: For tox.ini use only" >&2; exit 1; fi + @if [[ -z $${TOX_ENV_DIR} ]]; then \ + echo "ERROR: For tox.ini use only" >&2; \ + exit 1; \ + fi # NOTE: Intended only for use from tox.ini. # Install Node.js within the tox virtualenv. install-node: tox - nodeenv --node 16.17.0 --python-virtualenv + nodeenv --node lts --python-virtualenv # NOTE: Intended only for use from tox.ini # Install the CDK CLI within the tox virtualenv. -install-cdk: tox install-node - npm install --location global "aws-cdk@1.x" - # Acknowledge CDK notice regarding CDK v1 being in maintenance mode. - grep -q 19836 cdk.context.json 2>/dev/null || cdk acknowledge 19836 - -## unit-tests: Run unit tests -unit-tests: - tox -v -r - -## integration-tests: Run integration tests (requires ci-deploy) -integration-tests: - tox -v -e integration -r +install-cdk: install-node + npm install --location global "aws-cdk@latest" -## synth: Run CDK synth -synth: - tox -v -e dev -r -- synth '*' --app cdk/app.py +## bootstrap: Bootstrap the CDK toolkit +bootstrap: + $(TOX) $(RECREATE) -e dev -- bootstrap \ + --toolkit-stack-name CDKToolkitV2 \ + --custom-permissions-boundary mcp-tenantOperator \ + --template cdk/bootstrap-template.yaml -## deploy: Run CDK deploy -deploy: - tox -v -e dev -r -- deploy '*' --app cdk/app.py --progress events --require-approval never - -## diff: Run CDK diff -diff: - tox -v -e dev -r -- diff '*' --app cdk/app.py +## unit-tests: Run unit tests +unit-tests: venv + $(TOX) $(RECREATE) -## destroy: Run CDK destroy -destroy: - tox -v -e dev -r -- destroy --force '*' --app cdk/app.py --progress events +## APP-integration-tests: Run integration tests for a CDK app (depends on deploy-APP-it) +$(patsubst %-it,%-integration-tests,$(IT_APPS)): venv + $(TOX) $(RECREATE) -e integration -- $(wildcard tests/integration/test_$(subst -integration-tests,,$@)*.py) -## ci-synth: Run CDK synth for integration stack -ci-synth: - tox -v -e dev -r -- deploy '*' --app cdk/app_ci.py +## synth-APP: Synthesize a CDK app +$(patsubst %,synth-%,$(APPS)): venv + $(TOX) $(RECREATE) -e dev -- synth --all --app $(subst -,_,$(patsubst synth-%,cdk/app_%.py,$@)) -## ci-deploy: Run CDK deploy for integration stack -ci-deploy: - tox -v -e dev -r -- deploy '*' --app cdk/app_ci.py --progress events --require-approval never +## diff-APP: Diff a CDK app +$(patsubst %,diff-%,$(APPS)): venv + $(TOX) $(RECREATE) -e dev -- diff --all --app $(subst -,_,$(patsubst diff-%,cdk/app_%.py,$@)) -## ci-diff: Run CDK diff for integration stack -ci-diff: - tox -v -e dev -r -- diff '*' --app cdk/app_ci.py +## deploy-APP: Deploy a CDK app +$(patsubst %,deploy-%,$(APPS)): venv + $(TOX) $(RECREATE) -e dev -- deploy --all --app $(subst -,_,$(patsubst deploy-%,cdk/app_%.py,$@)) --progress events --require-approval never -## ci-destroy: Run CDK destroy for integration stack -ci-destroy: - tox -v -e dev -r -- destroy --force '*' --app cdk/app_ci.py --progress events +## destroy-APP: Destroy a CDK app +$(patsubst %,destroy-%,$(APPS)): venv + $(TOX) $(RECREATE) -e dev -- destroy --all --app $(subst -,_,$(patsubst destroy-%,cdk/app_%.py,$@)) --progress events --force diff --git a/README.md b/README.md index 8cab85c..242423c 100644 --- a/README.md +++ b/README.md @@ -12,24 +12,38 @@ ## Environment Settings ```plain -export AWS_PROFILE= -export AWS_REGION= +# AWS Short-term Access Key + +export AWS_DEFAULT_REGION=us-west-2 +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +export AWS_SESSION_TOKEN= + +# Stack variables + export HLS_LPDAAC_STACK= export HLS_LPDAAC_BUCKET_NAME= export HLS_LPDAAC_QUEUE_ARN= - -# Optional -export HLS_LPDAAC_MANAGED_POLICY_NAME=<(e.g., mcp-tenantOperator)> +# Required ONLY in PROD for FORWARD processing (otherwise, a dummy queue is created) +export HLS_LPDAAC_TILER_QUEUE_ARN= +export HLS_LPDAAC_MANAGED_POLICY_NAME=mcp-tenantOperator ``` ## CDK Commands +In the `make` commands shown below, `` must be one of the following: + +- `forward` +- `forward-it` (integration test stack) +- `historical` +- `historical-it` (integration test stack) + ### Synth Display generated cloud formation template that will be used to deploy. ```plain -make synth +make synth- ``` ### Diff @@ -37,7 +51,7 @@ make synth Display a diff of the current deployment and any changes created. ```plain -make diff +make diff- ``` ### Deploy @@ -45,7 +59,7 @@ make diff Deploy current version of stack: ```plain -make deploy +make deploy- ``` ### Destroy @@ -53,7 +67,7 @@ make deploy Destroy current version of stack: ```plain -make destroy +make destroy- ``` ### Development @@ -64,11 +78,11 @@ For active stack development run tox -e dev -r -- version ``` -This creates a local virtualenv in the directory `.venv-dev`. +This creates a local virtualenv in the directory `.venv`. To use it for development: ```plain -source .venv-dev/bin/activate +source .venv/bin/activate ``` Install pre-commit hooks: @@ -94,10 +108,18 @@ To run unit tests: make unit-tests ``` -To run integration tests: +To run integration tests for forward processing: + +```plain +make deploy-forward-it +make forward-integration-tests +make destroy-forward-it +``` + +To run integration tests for historical processing: ```plain -make ci-deploy -make integration-tests -make ci-destroy +make deploy-historical-it +make historical-integration-tests +make destroy-historical-it ``` diff --git a/cdk.json b/cdk.json index 72d394f..f41715c 100644 --- a/cdk.json +++ b/cdk.json @@ -1,5 +1,5 @@ { - "app": "python3 cdk/app.py", + "app": "python cdk/bootstrap.py", "watch": { "include": [ "**" @@ -15,16 +15,9 @@ "context": { "@aws-cdk/aws-apigateway:usagePlanKeyOrderInsensitiveId": true, "@aws-cdk/aws-cloudfront:defaultSecurityPolicyTLSv1.2_2021": true, - "@aws-cdk/aws-ecr-assets:dockerIgnoreSupport": true, "@aws-cdk/aws-ecs-patterns:removeDefaultDesiredCount": true, - "@aws-cdk/aws-efs:defaultEncryptionAtRest": true, - "@aws-cdk/aws-kms:defaultKeyPolicies": true, "@aws-cdk/aws-lambda:recognizeVersionProps": true, "@aws-cdk/aws-rds:lowercaseDbIdentifier": true, - "@aws-cdk/aws-s3:grantWriteWithoutAcl": true, - "@aws-cdk/aws-secretsmanager:parseOwnedSecretName": true, - "@aws-cdk/core:enableStackNameDuplicates": true, - "@aws-cdk/core:stackRelativeExports": true, - "aws-cdk:enableDiffNoFail": true + "@aws-cdk/core:stackRelativeExports": true } } diff --git a/cdk/app_ci.py b/cdk/app_ci.py deleted file mode 100755 index 5b39b08..0000000 --- a/cdk/app_ci.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python3 - -from aws_cdk import aws_iam as iam -from aws_cdk import aws_ssm as ssm -from aws_cdk import core as cdk - -from stacks import HlsLpdaacIntegrationStack, HlsLpdaacStack - -ci_app = cdk.App() -account_id = iam.AccountRootPrincipal().account_id - -int_stack = HlsLpdaacIntegrationStack(ci_app, "integration-test-resources") -stack_under_test = HlsLpdaacStack( - ci_app, - "hls-lpdaac-under-test", - bucket_name=int_stack.bucket.bucket_name, - queue_arn=int_stack.queue.queue_arn, - managed_policy_name="mcp-tenantOperator", -) - -# Set SSM Parameter for use within integration tests - -ssm.StringParameter( - stack_under_test, - "function_name", - string_value=stack_under_test.lpdaac_historical_lambda.function_name, - parameter_name=("/tests/function_name"), -) - -for k, v in dict( - Project="hls", - Stack="lpdaac-integration", -).items(): - cdk.Tags.of(ci_app).add(k, v, apply_to_launched_instances=True) - -ci_app.synth() diff --git a/cdk/app_forward.py b/cdk/app_forward.py new file mode 100755 index 0000000..e1a3207 --- /dev/null +++ b/cdk/app_forward.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +import os + +from aws_cdk import App, Tags + +from stacks import ForwardNotificationStack + +# Required environment variables +stack_name = os.environ["HLS_LPDAAC_STACK"] +bucket_name = os.environ["HLS_LPDAAC_BUCKET_NAME"] +queue_arn = os.environ["HLS_LPDAAC_QUEUE_ARN"] + +# Optional environment variables +managed_policy_name = os.getenv("HLS_LPDAAC_MANAGED_POLICY_NAME") +# Optional, but if it is not provided, the tiler queue will be created, which +# is what we want in all environments except production since there is only a +# single tiler queue. +tiler_queue_arn = os.getenv("HLS_LPDAAC_TILER_QUEUE_ARN") + +app = App() + +ForwardNotificationStack( + app, + stack_name, + bucket_name=bucket_name, + lpdaac_queue_arn=queue_arn, + tiler_queue_arn=tiler_queue_arn, + managed_policy_name=managed_policy_name, +) + +for k, v in dict( + Project="hls", + Stack=stack_name, +).items(): + Tags.of(app).add(k, v, apply_to_launched_instances=True) + +app.synth() diff --git a/cdk/app_forward_it.py b/cdk/app_forward_it.py new file mode 100755 index 0000000..740fb38 --- /dev/null +++ b/cdk/app_forward_it.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 +import os + +from aws_cdk import App, Tags +from aws_cdk import aws_ssm as ssm + +from stacks import ForwardNotificationITStack, ForwardNotificationStack + +managed_policy_name = os.getenv("HLS_LPDAAC_MANAGED_POLICY_NAME") + +app = App() + +it_stack = ForwardNotificationITStack( + app, + "hls-forward-it-resources", + managed_policy_name=managed_policy_name, +) +stack_under_test = ForwardNotificationStack( + app, + "hls-forward-under-test", + bucket_name=it_stack.bucket.bucket_name, + lpdaac_queue_arn=it_stack.forward_queue.queue_arn, + tiler_queue_arn=it_stack.tiler_queue.queue_arn, + managed_policy_name=managed_policy_name, +) + +# Set SSM Parameter for use within integration tests. Others are set directly +# within the it_stack itself. This one is set on stack_under_test rather than +# it_stack so we don't have a cyclic dependency. + +ssm.StringParameter( + stack_under_test, + "forward-function-name", + string_value=stack_under_test.notification_function.function_name, + parameter_name=("/hls/tests/forward-function-name"), +) + +for k, v in dict( + Project="hls", + App="forward-it", +).items(): + Tags.of(app).add(k, v, apply_to_launched_instances=True) + +app.synth() diff --git a/cdk/app.py b/cdk/app_historical.py similarity index 67% rename from cdk/app.py rename to cdk/app_historical.py index a6ccf68..0fd4dae 100755 --- a/cdk/app.py +++ b/cdk/app_historical.py @@ -1,9 +1,9 @@ #!/usr/bin/env python3 import os -from aws_cdk import core as cdk +from aws_cdk import App, Tags -from stacks import HlsLpdaacStack +from stacks import HistoricalNotificationStack # Required environment variables stack_name = os.environ["HLS_LPDAAC_STACK"] @@ -13,10 +13,9 @@ # Optional environment variables managed_policy_name = os.getenv("HLS_LPDAAC_MANAGED_POLICY_NAME") -app = cdk.App() +app = App() -# For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html -HlsLpdaacStack( +HistoricalNotificationStack( app, stack_name, bucket_name=bucket_name, @@ -28,6 +27,6 @@ Project="hls", Stack=stack_name, ).items(): - cdk.Tags.of(app).add(k, v, apply_to_launched_instances=True) + Tags.of(app).add(k, v, apply_to_launched_instances=True) app.synth() diff --git a/cdk/app_historical_it.py b/cdk/app_historical_it.py new file mode 100755 index 0000000..d6f8e00 --- /dev/null +++ b/cdk/app_historical_it.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 +import os + +from aws_cdk import App, Tags +from aws_cdk import aws_ssm as ssm + +from stacks import HistoricalNotificationITStack, HistoricalNotificationStack + +managed_policy_name = os.getenv("HLS_LPDAAC_MANAGED_POLICY_NAME") + +app = App() + +it_stack = HistoricalNotificationITStack( + app, + "hls-historical-it-resources", + managed_policy_name=managed_policy_name, +) +stack_under_test = HistoricalNotificationStack( + app, + "hls-historical-under-test", + bucket_name=it_stack.bucket.bucket_name, + queue_arn=it_stack.queue.queue_arn, + managed_policy_name=managed_policy_name, +) + +# Set SSM Parameter for use within integration tests + +ssm.StringParameter( + stack_under_test, + "historical-function-name", + string_value=stack_under_test.lpdaac_historical_lambda.function_name, + parameter_name=("/hls/tests/historical-function-name"), +) + +for k, v in dict( + Project="hls", + App="historical-it", +).items(): + Tags.of(app).add(k, v, apply_to_launched_instances=True) + +app.synth() diff --git a/cdk/bootstrap-template.yaml b/cdk/bootstrap-template.yaml new file mode 100644 index 0000000..8bd4e66 --- /dev/null +++ b/cdk/bootstrap-template.yaml @@ -0,0 +1,629 @@ +Description: This stack includes resources needed to deploy AWS CDK apps into this environment +Parameters: + TrustedAccounts: + Description: List of AWS accounts that are trusted to publish assets and deploy stacks to this environment + Default: "" + Type: CommaDelimitedList + TrustedAccountsForLookup: + Description: List of AWS accounts that are trusted to look up values in this environment + Default: "" + Type: CommaDelimitedList + CloudFormationExecutionPolicies: + Description: List of the ManagedPolicy ARN(s) to attach to the CloudFormation deployment role + Default: "" + Type: CommaDelimitedList + FileAssetsBucketName: + Description: The name of the S3 bucket used for file assets + Default: "" + Type: String + FileAssetsBucketKmsKeyId: + Description: Empty to create a new key (default), 'AWS_MANAGED_KEY' to use a managed S3 key, or the ID/ARN of an existing key. + Default: "" + Type: String + ContainerAssetsRepositoryName: + Description: A user-provided custom name to use for the container assets ECR repository + Default: "" + Type: String + Qualifier: + Description: An identifier to distinguish multiple bootstrap stacks in the same environment + Default: hnb659fds + Type: String + AllowedPattern: "[A-Za-z0-9_-]{1,10}" + ConstraintDescription: Qualifier must be an alphanumeric identifier of at most 10 characters + PublicAccessBlockConfiguration: + Description: Whether or not to enable S3 Staging Bucket Public Access Block Configuration + Default: "true" + Type: String + AllowedValues: + - "true" + - "false" + InputPermissionsBoundary: + Description: Whether or not to use either the CDK supplied or custom permissions boundary + Default: "" + Type: String + UseExamplePermissionsBoundary: + Default: "false" + AllowedValues: + - "true" + - "false" + Type: String + BootstrapVariant: + Type: String + Default: "AWS CDK: Default Resources" + Description: Describe the provenance of the resources in this bootstrap stack. Change this when you customize the template. To prevent accidents, the CDK CLI will not overwrite bootstrap stacks with a different variant. +Conditions: + HasTrustedAccounts: + Fn::Not: + - Fn::Equals: + - "" + - Fn::Join: + - "" + - Ref: TrustedAccounts + HasTrustedAccountsForLookup: + Fn::Not: + - Fn::Equals: + - "" + - Fn::Join: + - "" + - Ref: TrustedAccountsForLookup + HasCloudFormationExecutionPolicies: + Fn::Not: + - Fn::Equals: + - "" + - Fn::Join: + - "" + - Ref: CloudFormationExecutionPolicies + HasCustomFileAssetsBucketName: + Fn::Not: + - Fn::Equals: + - "" + - Ref: FileAssetsBucketName + CreateNewKey: + Fn::Equals: + - "" + - Ref: FileAssetsBucketKmsKeyId + UseAwsManagedKey: + Fn::Equals: + - AWS_MANAGED_KEY + - Ref: FileAssetsBucketKmsKeyId + ShouldCreatePermissionsBoundary: + Fn::Equals: + - "true" + - Ref: UseExamplePermissionsBoundary + PermissionsBoundarySet: + Fn::Not: + - Fn::Equals: + - "" + - Ref: InputPermissionsBoundary + HasCustomContainerAssetsRepositoryName: + Fn::Not: + - Fn::Equals: + - "" + - Ref: ContainerAssetsRepositoryName + UsePublicAccessBlockConfiguration: + Fn::Equals: + - "true" + - Ref: PublicAccessBlockConfiguration +Resources: + FileAssetsBucketEncryptionKey: + Type: AWS::KMS::Key + Properties: + KeyPolicy: + Statement: + - Action: + - kms:Create* + - kms:Describe* + - kms:Enable* + - kms:List* + - kms:Put* + - kms:Update* + - kms:Revoke* + - kms:Disable* + - kms:Get* + - kms:Delete* + - kms:ScheduleKeyDeletion + - kms:CancelKeyDeletion + - kms:GenerateDataKey + - kms:TagResource + - kms:UntagResource + Effect: Allow + Principal: + AWS: + Ref: AWS::AccountId + Resource: "*" + - Action: + - kms:Decrypt + - kms:DescribeKey + - kms:Encrypt + - kms:ReEncrypt* + - kms:GenerateDataKey* + Effect: Allow + Principal: + AWS: "*" + Resource: "*" + Condition: + StringEquals: + kms:CallerAccount: + Ref: AWS::AccountId + kms:ViaService: + - Fn::Sub: s3.${AWS::Region}.amazonaws.com + - Action: + - kms:Decrypt + - kms:DescribeKey + - kms:Encrypt + - kms:ReEncrypt* + - kms:GenerateDataKey* + Effect: Allow + Principal: + AWS: + Fn::Sub: ${FilePublishingRole.Arn} + Resource: "*" + Condition: CreateNewKey + FileAssetsBucketEncryptionKeyAlias: + Condition: CreateNewKey + Type: AWS::KMS::Alias + Properties: + AliasName: + Fn::Sub: alias/cdk-${Qualifier}-assets-key + TargetKeyId: + Ref: FileAssetsBucketEncryptionKey + StagingBucket: + Type: AWS::S3::Bucket + Properties: + BucketName: + Fn::If: + - HasCustomFileAssetsBucketName + - Fn::Sub: ${FileAssetsBucketName} + - Fn::Sub: cdk-${Qualifier}-assets-${AWS::AccountId}-${AWS::Region} + AccessControl: Private + BucketEncryption: + ServerSideEncryptionConfiguration: + - ServerSideEncryptionByDefault: + SSEAlgorithm: aws:kms + KMSMasterKeyID: + Fn::If: + - CreateNewKey + - Fn::Sub: ${FileAssetsBucketEncryptionKey.Arn} + - Fn::If: + - UseAwsManagedKey + - Ref: AWS::NoValue + - Fn::Sub: ${FileAssetsBucketKmsKeyId} + VersioningConfiguration: + Status: Enabled + LifecycleConfiguration: + Rules: + - Id: CleanupOldVersions + Status: Enabled + NoncurrentVersionExpiration: + NoncurrentDays: 365 + UpdateReplacePolicy: Retain + DeletionPolicy: Retain + StagingBucketPolicy: + Type: AWS::S3::BucketPolicy + Properties: + Bucket: + Ref: StagingBucket + PolicyDocument: + Id: AccessControl + Version: "2012-10-17" + Statement: + - Sid: AllowSSLRequestsOnly + Action: s3:* + Effect: Deny + Resource: + - Fn::Sub: ${StagingBucket.Arn} + - Fn::Sub: ${StagingBucket.Arn}/* + Condition: + Bool: + aws:SecureTransport: "false" + Principal: "*" + ContainerAssetsRepository: + Type: AWS::ECR::Repository + Properties: + ImageTagMutability: IMMUTABLE + LifecyclePolicy: + LifecyclePolicyText: | + { + "rules": [ + { + "rulePriority": 1, + "description": "Untagged images should not exist, but expire any older than one year", + "selection": { + "tagStatus": "untagged", + "countType": "sinceImagePushed", + "countUnit": "days", + "countNumber": 365 + }, + "action": { "type": "expire" } + } + ] + } + RepositoryName: + Fn::If: + - HasCustomContainerAssetsRepositoryName + - Fn::Sub: ${ContainerAssetsRepositoryName} + - Fn::Sub: cdk-${Qualifier}-container-assets-${AWS::AccountId}-${AWS::Region} + RepositoryPolicyText: + Version: "2012-10-17" + Statement: + - Sid: LambdaECRImageRetrievalPolicy + Effect: Allow + Principal: + Service: lambda.amazonaws.com + Action: + - ecr:BatchGetImage + - ecr:GetDownloadUrlForLayer + Condition: + StringLike: + aws:sourceArn: + Fn::Sub: arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:* + FilePublishingRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + AWS: + Ref: AWS::AccountId + - Fn::If: + - HasTrustedAccounts + - Action: sts:AssumeRole + Effect: Allow + Principal: + AWS: + Ref: TrustedAccounts + - Ref: AWS::NoValue + RoleName: + Fn::Sub: cdk-${Qualifier}-file-publishing-role-${AWS::AccountId}-${AWS::Region} + PermissionsBoundary: + Fn::If: + - PermissionsBoundarySet + - Fn::Sub: arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/${InputPermissionsBoundary} + - Ref: AWS::NoValue + Tags: + - Key: aws-cdk:bootstrap-role + Value: file-publishing + ImagePublishingRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + AWS: + Ref: AWS::AccountId + - Fn::If: + - HasTrustedAccounts + - Action: sts:AssumeRole + Effect: Allow + Principal: + AWS: + Ref: TrustedAccounts + - Ref: AWS::NoValue + RoleName: + Fn::Sub: cdk-${Qualifier}-image-publishing-role-${AWS::AccountId}-${AWS::Region} + PermissionsBoundary: + Fn::If: + - PermissionsBoundarySet + - Fn::Sub: arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/${InputPermissionsBoundary} + - Ref: AWS::NoValue + Tags: + - Key: aws-cdk:bootstrap-role + Value: image-publishing + LookupRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + AWS: + Ref: AWS::AccountId + - Fn::If: + - HasTrustedAccountsForLookup + - Action: sts:AssumeRole + Effect: Allow + Principal: + AWS: + Ref: TrustedAccountsForLookup + - Ref: AWS::NoValue + - Fn::If: + - HasTrustedAccounts + - Action: sts:AssumeRole + Effect: Allow + Principal: + AWS: + Ref: TrustedAccounts + - Ref: AWS::NoValue + RoleName: + Fn::Sub: cdk-${Qualifier}-lookup-role-${AWS::AccountId}-${AWS::Region} + PermissionsBoundary: + Fn::If: + - PermissionsBoundarySet + - Fn::Sub: arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/${InputPermissionsBoundary} + - Ref: AWS::NoValue + ManagedPolicyArns: + - Fn::Sub: arn:${AWS::Partition}:iam::aws:policy/ReadOnlyAccess + Policies: + - PolicyDocument: + Statement: + - Sid: DontReadSecrets + Effect: Deny + Action: + - kms:Decrypt + Resource: "*" + Version: "2012-10-17" + PolicyName: LookupRolePolicy + Tags: + - Key: aws-cdk:bootstrap-role + Value: lookup + FilePublishingRoleDefaultPolicy: + Type: AWS::IAM::Policy + Properties: + PolicyDocument: + Statement: + - Action: + - s3:GetObject* + - s3:GetBucket* + - s3:GetEncryptionConfiguration + - s3:List* + - s3:DeleteObject* + - s3:PutObject* + - s3:Abort* + Resource: + - Fn::Sub: ${StagingBucket.Arn} + - Fn::Sub: ${StagingBucket.Arn}/* + Effect: Allow + - Action: + - kms:Decrypt + - kms:DescribeKey + - kms:Encrypt + - kms:ReEncrypt* + - kms:GenerateDataKey* + Effect: Allow + Resource: + Fn::If: + - CreateNewKey + - Fn::Sub: ${FileAssetsBucketEncryptionKey.Arn} + - Fn::Sub: arn:${AWS::Partition}:kms:${AWS::Region}:${AWS::AccountId}:key/${FileAssetsBucketKmsKeyId} + Version: "2012-10-17" + Roles: + - Ref: FilePublishingRole + PolicyName: + Fn::Sub: cdk-${Qualifier}-file-publishing-role-default-policy-${AWS::AccountId}-${AWS::Region} + ImagePublishingRoleDefaultPolicy: + Type: AWS::IAM::Policy + Properties: + PolicyDocument: + Statement: + - Action: + - ecr:PutImage + - ecr:InitiateLayerUpload + - ecr:UploadLayerPart + - ecr:CompleteLayerUpload + - ecr:BatchCheckLayerAvailability + - ecr:DescribeRepositories + - ecr:DescribeImages + - ecr:BatchGetImage + - ecr:GetDownloadUrlForLayer + Resource: + Fn::Sub: ${ContainerAssetsRepository.Arn} + Effect: Allow + - Action: + - ecr:GetAuthorizationToken + Resource: "*" + Effect: Allow + Version: "2012-10-17" + Roles: + - Ref: ImagePublishingRole + PolicyName: + Fn::Sub: cdk-${Qualifier}-image-publishing-role-default-policy-${AWS::AccountId}-${AWS::Region} + DeploymentActionRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + AWS: + Ref: AWS::AccountId + - Fn::If: + - HasTrustedAccounts + - Action: sts:AssumeRole + Effect: Allow + Principal: + AWS: + Ref: TrustedAccounts + - Ref: AWS::NoValue + Policies: + - PolicyDocument: + Statement: + - Sid: CloudFormationPermissions + Effect: Allow + Action: + - cloudformation:CreateChangeSet + - cloudformation:DeleteChangeSet + - cloudformation:DescribeChangeSet + - cloudformation:DescribeStacks + - cloudformation:ExecuteChangeSet + - cloudformation:CreateStack + - cloudformation:UpdateStack + Resource: "*" + - Sid: PipelineCrossAccountArtifactsBucket + Effect: Allow + Action: + - s3:GetObject* + - s3:GetBucket* + - s3:List* + - s3:Abort* + - s3:DeleteObject* + - s3:PutObject* + Resource: "*" + Condition: + StringNotEquals: + s3:ResourceAccount: + Ref: AWS::AccountId + - Sid: PipelineCrossAccountArtifactsKey + Effect: Allow + Action: + - kms:Decrypt + - kms:DescribeKey + - kms:Encrypt + - kms:ReEncrypt* + - kms:GenerateDataKey* + Resource: "*" + Condition: + StringEquals: + kms:ViaService: + Fn::Sub: s3.${AWS::Region}.amazonaws.com + - Action: iam:PassRole + Resource: + Fn::Sub: ${CloudFormationExecutionRole.Arn} + Effect: Allow + - Sid: CliPermissions + Action: + - cloudformation:DescribeStackEvents + - cloudformation:GetTemplate + - cloudformation:DeleteStack + - cloudformation:UpdateTerminationProtection + - sts:GetCallerIdentity + - cloudformation:GetTemplateSummary + Resource: "*" + Effect: Allow + - Sid: CliStagingBucket + Effect: Allow + Action: + - s3:GetObject* + - s3:GetBucket* + - s3:List* + Resource: + - Fn::Sub: ${StagingBucket.Arn} + - Fn::Sub: ${StagingBucket.Arn}/* + - Sid: ReadVersion + Effect: Allow + Action: + - ssm:GetParameter + - ssm:GetParameters + Resource: + - Fn::Sub: arn:${AWS::Partition}:ssm:${AWS::Region}:${AWS::AccountId}:parameter${CdkBootstrapVersion} + Version: "2012-10-17" + PolicyName: default + RoleName: + Fn::Sub: cdk-${Qualifier}-deploy-role-${AWS::AccountId}-${AWS::Region} + PermissionsBoundary: + Fn::If: + - PermissionsBoundarySet + - Fn::Sub: arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/${InputPermissionsBoundary} + - Ref: AWS::NoValue + Tags: + - Key: aws-cdk:bootstrap-role + Value: deploy + CloudFormationExecutionRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: cloudformation.amazonaws.com + Version: "2012-10-17" + ManagedPolicyArns: + Fn::If: + - HasCloudFormationExecutionPolicies + - Ref: CloudFormationExecutionPolicies + - Fn::If: + - HasTrustedAccounts + - Ref: AWS::NoValue + - - Fn::Sub: arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess + RoleName: + Fn::Sub: cdk-${Qualifier}-cfn-exec-role-${AWS::AccountId}-${AWS::Region} + PermissionsBoundary: + Fn::If: + - PermissionsBoundarySet + - Fn::Sub: arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/${InputPermissionsBoundary} + - Ref: AWS::NoValue + CdkBoostrapPermissionsBoundaryPolicy: + Condition: ShouldCreatePermissionsBoundary + Type: AWS::IAM::ManagedPolicy + Properties: + PolicyDocument: + Statement: + - Sid: ExplicitAllowAll + Action: + - "*" + Effect: Allow + Resource: "*" + - Sid: DenyAccessIfRequiredPermBoundaryIsNotBeingApplied + Action: + - iam:CreateUser + - iam:CreateRole + - iam:PutRolePermissionsBoundary + - iam:PutUserPermissionsBoundary + Condition: + StringNotEquals: + iam:PermissionsBoundary: + Fn::Sub: arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/cdk-${Qualifier}-permissions-boundary-${AWS::AccountId}-${AWS::Region} + Effect: Deny + Resource: "*" + - Sid: DenyPermBoundaryIAMPolicyAlteration + Action: + - iam:CreatePolicyVersion + - iam:DeletePolicy + - iam:DeletePolicyVersion + - iam:SetDefaultPolicyVersion + Effect: Deny + Resource: + Fn::Sub: arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/cdk-${Qualifier}-permissions-boundary-${AWS::AccountId}-${AWS::Region} + - Sid: DenyRemovalOfPermBoundaryFromAnyUserOrRole + Action: + - iam:DeleteUserPermissionsBoundary + - iam:DeleteRolePermissionsBoundary + Effect: Deny + Resource: "*" + Version: "2012-10-17" + Description: Bootstrap Permission Boundary + ManagedPolicyName: + Fn::Sub: cdk-${Qualifier}-permissions-boundary-${AWS::AccountId}-${AWS::Region} + Path: / + CdkBootstrapVersion: + Type: AWS::SSM::Parameter + Properties: + Type: String + Name: + Fn::Sub: /cdk-bootstrap/${Qualifier}/version + Value: "20" +Outputs: + BucketName: + Description: The name of the S3 bucket owned by the CDK toolkit stack + Value: + Fn::Sub: ${StagingBucket} + BucketDomainName: + Description: The domain name of the S3 bucket owned by the CDK toolkit stack + Value: + Fn::Sub: ${StagingBucket.RegionalDomainName} + FileAssetKeyArn: + Description: The ARN of the KMS key used to encrypt the asset bucket (deprecated) + Value: + Fn::If: + - CreateNewKey + - Fn::Sub: ${FileAssetsBucketEncryptionKey.Arn} + - Fn::Sub: ${FileAssetsBucketKmsKeyId} + Export: + Name: + Fn::Sub: CdkBootstrap-${Qualifier}-FileAssetKeyArn + ImageRepositoryName: + Description: The name of the ECR repository which hosts docker image assets + Value: + Fn::Sub: ${ContainerAssetsRepository} + BootstrapVersion: + Description: The version of the bootstrap resources that are currently mastered in this stack + Value: + Fn::GetAtt: + - CdkBootstrapVersion + - Value diff --git a/cdk/bootstrap.py b/cdk/bootstrap.py new file mode 100644 index 0000000..ea20b76 --- /dev/null +++ b/cdk/bootstrap.py @@ -0,0 +1,2 @@ +# Dummy app to bootstrap the CDK. This allows us to create/update the bootstrap +# stack without having to deploy any apps. diff --git a/cdk/stacks/__init__.py b/cdk/stacks/__init__.py index 8f50ea8..233b2f8 100644 --- a/cdk/stacks/__init__.py +++ b/cdk/stacks/__init__.py @@ -1,7 +1,13 @@ -from .hls_lpdaac_stack import HlsLpdaacStack -from .hls_lpdaac_stack_ci import HlsLpdaacIntegrationStack +from .forward_notification import NotificationStack as ForwardNotificationStack +from .forward_notification_it import NotificationITStack as ForwardNotificationITStack +from .historical_notification import NotificationStack as HistoricalNotificationStack +from .historical_notification_it import ( + NotificationITStack as HistoricalNotificationITStack, +) __all__ = [ - "HlsLpdaacStack", - "HlsLpdaacIntegrationStack", + "ForwardNotificationStack", + "ForwardNotificationITStack", + "HistoricalNotificationStack", + "HistoricalNotificationITStack", ] diff --git a/cdk/stacks/forward_notification.py b/cdk/stacks/forward_notification.py new file mode 100644 index 0000000..56d74cc --- /dev/null +++ b/cdk/stacks/forward_notification.py @@ -0,0 +1,67 @@ +from typing import Optional, Union + +from aws_cdk import Duration, Stack +from aws_cdk import aws_iam as iam +from aws_cdk import aws_lambda as lambda_ +from aws_cdk import aws_s3 as s3 +from aws_cdk import aws_s3_notifications as s3n +from aws_cdk import aws_sqs as sqs +from constructs import Construct + + +class NotificationStack(Stack): + def __init__( + self, + scope: Construct, + stack_name: str, + *, + bucket_name: str, + lpdaac_queue_arn: str, + tiler_queue_arn: Optional[str] = None, + managed_policy_name: Optional[str] = None, + ) -> None: + super().__init__(scope, stack_name) + + if managed_policy_name: + iam.PermissionsBoundary.of(self).apply( + iam.ManagedPolicy.from_managed_policy_name( + self, + "PermissionsBoundary", + managed_policy_name, + ) + ) + + # Define resources + + self.bucket = s3.Bucket.from_bucket_name(self, "hls-output", bucket_name) + self.lpdaac_queue = sqs.Queue.from_queue_arn( + self, "lpdaac", queue_arn=lpdaac_queue_arn + ) + self.tiler_queue: Union[sqs.Queue, sqs.IQueue] = ( + sqs.Queue(self, "tiler", retention_period=Duration.minutes(5)) + if tiler_queue_arn is None + else sqs.Queue.from_queue_arn(self, "tiler", queue_arn=tiler_queue_arn) + ) + self.notification_function = lambda_.Function( + self, + "ForwardNotifier", + code=lambda_.Code.from_asset("src/hls_lpdaac/forward"), + handler="index.handler", + runtime=lambda_.Runtime.PYTHON_3_9, # type: ignore + memory_size=128, + timeout=Duration.seconds(30), + environment=dict( + LPDAAC_QUEUE_URL=self.lpdaac_queue.queue_url, + TILER_QUEUE_URL=self.tiler_queue.queue_url, + ), + ) + + # Wire everything up + + self.lpdaac_queue.grant_send_messages(self.notification_function) + self.tiler_queue.grant_send_messages(self.notification_function) + self.bucket.grant_read(self.notification_function) + self.bucket.add_object_created_notification( + s3n.LambdaDestination(self.notification_function), # type: ignore + s3.NotificationKeyFilter(suffix=".v2.0.json"), + ) diff --git a/cdk/stacks/forward_notification_it.py b/cdk/stacks/forward_notification_it.py new file mode 100644 index 0000000..7fe7d83 --- /dev/null +++ b/cdk/stacks/forward_notification_it.py @@ -0,0 +1,60 @@ +from typing import Optional + +from aws_cdk import RemovalPolicy, Stack +from aws_cdk import aws_iam as iam +from aws_cdk import aws_s3 as s3 +from aws_cdk import aws_sqs as sqs +from aws_cdk import aws_ssm as ssm +from constructs import Construct + + +class NotificationITStack(Stack): + def __init__( + self, + scope: Construct, + id: str, + *, + managed_policy_name: Optional[str] = None, + ) -> None: + super().__init__(scope, id) + + if managed_policy_name: + iam.PermissionsBoundary.of(self).apply( + iam.ManagedPolicy.from_managed_policy_name( + self, + "PermissionsBoundary", + managed_policy_name, + ) + ) + + self.bucket = s3.Bucket( + self, + "test-bucket", + removal_policy=RemovalPolicy.DESTROY, + auto_delete_objects=True, + ) + self.forward_queue = sqs.Queue(self, "forward-queue") + self.tiler_queue = sqs.Queue(self, "tiler-queue") + + # Set SSM Parameters for use within integration tests + + ssm.StringParameter( + self, + "bucket-name", + string_value=self.bucket.bucket_name, + parameter_name="/hls/tests/forward-bucket-name", + ) + + ssm.StringParameter( + self, + "forward-queue-name", + string_value=self.forward_queue.queue_name, + parameter_name="/hls/tests/forward-queue-name", + ) + + ssm.StringParameter( + self, + "tiler-queue-name", + string_value=self.tiler_queue.queue_name, + parameter_name="/hls/tests/tiler-queue-name", + ) diff --git a/cdk/stacks/hls_lpdaac_stack.py b/cdk/stacks/historical_notification.py similarity index 78% rename from cdk/stacks/hls_lpdaac_stack.py rename to cdk/stacks/historical_notification.py index 5fa8f8d..e2ec82a 100644 --- a/cdk/stacks/hls_lpdaac_stack.py +++ b/cdk/stacks/historical_notification.py @@ -1,17 +1,18 @@ from typing import Optional +from aws_cdk import Duration, Stack from aws_cdk import aws_iam as iam from aws_cdk import aws_lambda as lambda_ from aws_cdk import aws_s3 as s3 from aws_cdk import aws_s3_notifications as s3n from aws_cdk import aws_sqs as sqs -from aws_cdk import core as cdk +from constructs import Construct -class HlsLpdaacStack(cdk.Stack): +class NotificationStack(Stack): def __init__( self, - scope: cdk.Construct, + scope: Construct, stack_name: str, *, bucket_name: str, @@ -21,36 +22,34 @@ def __init__( super().__init__(scope, stack_name) if managed_policy_name: - account_id = iam.AccountRootPrincipal().account_id - iam.PermissionsBoundary.of(self).apply( - iam.ManagedPolicy.from_managed_policy_arn( + iam.ManagedPolicy.from_managed_policy_name( self, "PermissionsBoundary", - f"arn:aws:iam::{account_id}:policy/{managed_policy_name}", + managed_policy_name, ) ) self.lpdaac_historical_bucket = s3.Bucket.from_bucket_name( self, - "LpdaacHistoricalBucket", + "HistoricalBucket", bucket_name, ) self.lpdaac_historical_queue = sqs.Queue.from_queue_arn( self, - "LpdaacHistoricalQueue", + "HistoricalQueue", queue_arn=queue_arn, ) self.lpdaac_historical_lambda = lambda_.Function( self, - "LpdaacHistoricalLambda", + "HistoricalLambda", code=lambda_.Code.from_asset("src/hls_lpdaac/historical"), handler="index.handler", runtime=lambda_.Runtime.PYTHON_3_9, # type: ignore memory_size=128, - timeout=cdk.Duration.seconds(30), + timeout=Duration.seconds(30), environment=dict(QUEUE_URL=self.lpdaac_historical_queue.queue_url), ) diff --git a/cdk/stacks/historical_notification_it.py b/cdk/stacks/historical_notification_it.py new file mode 100644 index 0000000..f655e1d --- /dev/null +++ b/cdk/stacks/historical_notification_it.py @@ -0,0 +1,52 @@ +from typing import Optional + +from aws_cdk import RemovalPolicy, Stack +from aws_cdk import aws_iam as iam +from aws_cdk import aws_s3 as s3 +from aws_cdk import aws_sqs as sqs +from aws_cdk import aws_ssm as ssm +from constructs import Construct + + +class NotificationITStack(Stack): + def __init__( + self, + scope: Construct, + id: str, + *, + managed_policy_name: Optional[str] = None, + ) -> None: + super().__init__(scope, id) + + if managed_policy_name: + iam.PermissionsBoundary.of(self).apply( + iam.ManagedPolicy.from_managed_policy_name( + self, + "PermissionsBoundary", + managed_policy_name, + ) + ) + + self.bucket = s3.Bucket( + self, + "test-bucket", + removal_policy=RemovalPolicy.DESTROY, + auto_delete_objects=True, + ) + self.queue = sqs.Queue(self, "test-queue") + + # Set SSM Parameters for use within integration tests + + ssm.StringParameter( + self, + "bucket-name", + string_value=self.bucket.bucket_name, + parameter_name="/hls/tests/historical-bucket-name", + ) + + ssm.StringParameter( + self, + "queue-name", + string_value=self.queue.queue_name, + parameter_name="/hls/tests/historical-queue-name", + ) diff --git a/cdk/stacks/hls_lpdaac_stack_ci.py b/cdk/stacks/hls_lpdaac_stack_ci.py deleted file mode 100644 index 8914ec1..0000000 --- a/cdk/stacks/hls_lpdaac_stack_ci.py +++ /dev/null @@ -1,28 +0,0 @@ -from aws_cdk import aws_s3 as s3 -from aws_cdk import aws_sqs as sqs -from aws_cdk import aws_ssm as ssm -from aws_cdk import core as cdk - - -class HlsLpdaacIntegrationStack(cdk.Stack): - def __init__(self, scope: cdk.Construct, id: str) -> None: - super().__init__(scope, id) - - self.bucket = s3.Bucket(self, "test-bucket") - self.queue = sqs.Queue(self, "test-queue") - - # Set SSM Parameters for use within integration tests - - ssm.StringParameter( - self, - "bucket_name", - string_value=self.bucket.bucket_name, - parameter_name=("/tests/bucket_name"), - ) - - ssm.StringParameter( - self, - "queue_name", - string_value=self.queue.queue_name, - parameter_name=("/tests/queue_name"), - ) diff --git a/setup.py b/setup.py index a850464..0f1f91d 100644 --- a/setup.py +++ b/setup.py @@ -1,46 +1,33 @@ -from typing import List - from setuptools import find_packages, setup # type: ignore aws_cdk_extras = [ - f"aws_cdk.{aws_cdk_package}<2" - for aws_cdk_package in [ - "core", - "assertions", - "aws-events", - "aws-events-targets", - "aws-iam", - "aws-lambda", - "aws-lambda-python", - "aws-s3", - "aws-s3-notifications", - "aws-sqs", - ] + "aws-cdk-lib>=2.0.0", + "constructs>=10.0.0", ] -install_requires: List[str] = [] +install_requires: list[str] = [] extras_require_test = [ *aws_cdk_extras, - "flake8", - "black", - "boto3", - "moto[s3,sqs]", - "pytest-cov", - "pytest", + "flake8~=7.0", + "black~=24.1", + "boto3~=1.34", + "moto[s3,sqs]~=4.0", + "pytest-cov~=4.1", + "pytest~=8.0", ] extras_require_dev = [ *extras_require_test, - "aws_lambda_typing", - "boto3-stubs[iam,lambda,s3,sqs,ssm]", - "botocore-stubs", - "isort", - "mypy", - "nodeenv", - "pre-commit", - "pre-commit-hooks", - "pyright", + "aws_lambda_typing~=2.18", + "boto3-stubs[iam,lambda,s3,sqs,ssm]~=1.34", + "botocore-stubs~=1.34", + "isort~=5.13", + "mypy~=1.8", + "nodeenv~=1.8", + "pre-commit~=3.6", + "pre-commit-hooks~=4.5", + "pyright~=1.1", ] extras_require = { diff --git a/src/hls_lpdaac/forward/__init__.py b/src/hls_lpdaac/forward/__init__.py new file mode 100644 index 0000000..6c0befd --- /dev/null +++ b/src/hls_lpdaac/forward/__init__.py @@ -0,0 +1,3 @@ +from .index import _handler + +__all__ = ["_handler"] diff --git a/src/hls_lpdaac/forward/index.py b/src/hls_lpdaac/forward/index.py new file mode 100644 index 0000000..e088fd8 --- /dev/null +++ b/src/hls_lpdaac/forward/index.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import os +from typing import TYPE_CHECKING + +import boto3 + +if TYPE_CHECKING: # pragma: no cover + from aws_lambda_typing.context import Context + from aws_lambda_typing.events import S3Event + +s3 = boto3.resource("s3") + + +def handler(event: "S3Event", _: "Context") -> None: + return _handler( + event, + lpdaac_queue_url=os.environ["LPDAAC_QUEUE_URL"], + tiler_queue_url=os.environ["TILER_QUEUE_URL"], + ) + + +# Enables unit testing without the need to monkeypatch `os.environ` (which would +# be necessary to test `handler` above). +def _handler(event: "S3Event", *, lpdaac_queue_url: str, tiler_queue_url: str) -> None: + # The S3Event type is not quite correct, so we are forced to ignore a couple + # of typing errors that would not occur if the type were defined correctly. + s3_object = event["Records"][0]["s3"] # type: ignore + bucket = s3_object["bucket"]["name"] + + json_key = s3_object["object"]["key"] # type: ignore + json_contents = s3.Object(bucket, json_key).get()["Body"].read().decode("utf-8") + _send_message(lpdaac_queue_url, key=json_key, message=json_contents) + + stac_json_key = json_key.replace(".json", "_stac.json") + stac_url = f"s3://{bucket}/{stac_json_key}" + _send_message(tiler_queue_url, key=stac_json_key, message=stac_url) + + +def _send_message(queue_url: str, *, key: str, message: str) -> None: + region_name = queue_url.split(".")[1] + sqs = boto3.client("sqs", region_name=region_name) + response = sqs.send_message(QueueUrl=queue_url, MessageBody=message) + status_code = response["ResponseMetadata"]["HTTPStatusCode"] + print(f"Status Code - {status_code} - {key}") diff --git a/src/hls_lpdaac/historical/index.py b/src/hls_lpdaac/historical/index.py index 4db02c4..d4199c9 100644 --- a/src/hls_lpdaac/historical/index.py +++ b/src/hls_lpdaac/historical/index.py @@ -3,7 +3,7 @@ import boto3 -if TYPE_CHECKING: +if TYPE_CHECKING: # pragma: no cover from aws_lambda_typing.context import Context from aws_lambda_typing.events import S3Event @@ -25,11 +25,7 @@ def _handler(event: "S3Event", queue_url: str) -> None: message = s3.Object(bucket, key).get()["Body"].read().decode("utf-8") - # To support testing, because mocked queue URLs do NOT contain a region (e.g., - # https://queue.amazonaws.com//). Setting region_name to - # None just forces use of default region for mock queue URLs. - region_name = None if (name := queue_url.split(".")[1]) == "amazonaws" else name - + region_name = queue_url.split(".")[1] sqs = boto3.client("sqs", region_name=region_name) response = sqs.send_message(QueueUrl=queue_url, MessageBody=message) status_code = response["ResponseMetadata"]["HTTPStatusCode"] diff --git a/tests/integration/test_forward_lambda.py b/tests/integration/test_forward_lambda.py new file mode 100644 index 0000000..0e8b519 --- /dev/null +++ b/tests/integration/test_forward_lambda.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from mypy_boto3_lambda import LambdaClient + from mypy_boto3_s3 import S3ServiceResource + from mypy_boto3_sqs import SQSServiceResource + from mypy_boto3_ssm import SSMClient + + +def test_notification( + lambda_: LambdaClient, + s3: S3ServiceResource, + sqs: SQSServiceResource, + ssm: SSMClient, +) -> None: + # Get source bucket + bucket_name = ssm_param_value(ssm, "/hls/tests/forward-bucket-name") + bucket = s3.Bucket(bucket_name) + + # Get forward notification queue + forward_queue_name = ssm_param_value(ssm, "/hls/tests/forward-queue-name") + forward_queue = sqs.get_queue_by_name(QueueName=forward_queue_name) + + # Get tiler queue + tiler_queue_name = ssm_param_value(ssm, "/hls/tests/tiler-queue-name") + tiler_queue = sqs.get_queue_by_name(QueueName=tiler_queue_name) + + # Write S3 Object with .v2.0.json suffix to source bucket to trigger notification. + body = '{ "greeting": "hello world!" }' + json_key = "greeting.v2.0.json" + obj = bucket.Object(json_key) + obj.put(Body=body) + obj.wait_until_exists() + + try: + # Wait for lambda function to succeed, which should be triggered by S3 + # notification of object created in bucket above. + name = ssm_param_value(ssm, "/hls/tests/forward-function-name") + waiter = lambda_.get_waiter("function_active_v2") + waiter.wait(FunctionName=name, WaiterConfig={"Delay": 5, "MaxAttempts": 20}) + + # Receive message from destination queue, which should be sent by Lambda + # function above. + forward_messages = forward_queue.receive_messages( + MaxNumberOfMessages=10, WaitTimeSeconds=20 + ) + tiler_messages = tiler_queue.receive_messages( + MaxNumberOfMessages=10, WaitTimeSeconds=20 + ) + finally: + # Cleanup S3 Object with .v2.0.json suffix from source bucket. + obj.delete() + obj.wait_until_not_exists() + + # Assert message contents == S3 Object contents (written above) + assert len(forward_messages) == 1 + assert forward_messages[0].body == body + + # Assert message contents == S3 Object contents (written above) + assert len(tiler_messages) == 1 + assert ( + tiler_messages[0].body + == f"s3://{bucket_name}/{json_key.replace('.json', '_stac.json')}" + ) + + +def ssm_param_value(ssm: SSMClient, name: str) -> str: + value = ssm.get_parameter(Name=name)["Parameter"].get("Value") + assert value is not None # make type checker happy + + return value diff --git a/tests/integration/test_historical_lambda.py b/tests/integration/test_historical_lambda.py index 10b4e3e..7932c09 100644 --- a/tests/integration/test_historical_lambda.py +++ b/tests/integration/test_historical_lambda.py @@ -5,9 +5,7 @@ if TYPE_CHECKING: from mypy_boto3_lambda import LambdaClient from mypy_boto3_s3 import S3ServiceResource - from mypy_boto3_s3.service_resource import Bucket from mypy_boto3_sqs import SQSServiceResource - from mypy_boto3_sqs.service_resource import Queue from mypy_boto3_ssm import SSMClient @@ -18,26 +16,23 @@ def test_notification( ssm: SSMClient, ) -> None: # Get source bucket - bucket_name = ssm.get_parameter(Name="/tests/bucket_name")["Parameter"].get("Value") - assert bucket_name is not None - bucket: "Bucket" = s3.Bucket(bucket_name) + bucket_name = ssm_param_value(ssm, "/hls/tests/historical-bucket-name") + bucket = s3.Bucket(bucket_name) # Get destination queue - queue_name = ssm.get_parameter(Name="/tests/queue_name")["Parameter"].get("Value") - assert queue_name is not None - queue: "Queue" = sqs.get_queue_by_name(QueueName=queue_name) + queue_name = ssm_param_value(ssm, "/hls/tests/historical-queue-name") + queue = sqs.get_queue_by_name(QueueName=queue_name) # Write S3 Object with .v2.0.json suffix to source bucket to trigger notification. body = '{ "greeting": "hello world!" }' - object = bucket.Object("greeting.v2.0.json") - object.put(Body=body) - object.wait_until_exists() + obj = bucket.Object("greeting.v2.0.json") + obj.put(Body=body) + obj.wait_until_exists() try: # Wait for lambda function to succeed, which should be triggered by S3 # notification of object created in bucket above. - name = ssm.get_parameter(Name="/tests/function_name")["Parameter"].get("Value") - assert name is not None + name = ssm_param_value(ssm, "/hls/tests/historical-function-name") waiter = lambda_.get_waiter("function_active_v2") waiter.wait(FunctionName=name, WaiterConfig={"Delay": 5, "MaxAttempts": 20}) @@ -46,9 +41,16 @@ def test_notification( messages = queue.receive_messages(WaitTimeSeconds=20) finally: # Cleanup S3 Object with .v2.0.json suffix from source bucket. - object.delete() - object.wait_until_not_exists() + obj.delete() + obj.wait_until_not_exists() # Assert message contents == S3 Object contents (written above) assert len(messages) == 1 assert messages[0].body == body + + +def ssm_param_value(ssm: SSMClient, name: str) -> str: + value = ssm.get_parameter(Name=name)["Parameter"].get("Value") + assert value is not None # make type checker happy + + return value diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index d558c1e..67aa106 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -3,7 +3,7 @@ import boto3 import pytest -from moto import mock_s3, mock_sqs # type: ignore +from moto import mock_s3, mock_sqs if TYPE_CHECKING: from aws_lambda_typing.events import S3Event @@ -39,7 +39,7 @@ def s3_bucket(s3: "S3ServiceResource") -> "Bucket": @pytest.fixture(scope="function") def s3_object(s3_bucket: "Bucket") -> "Object": - return s3_bucket.put_object(Key="myobject", Body=bytes("test", "utf-8")) + return s3_bucket.put_object(Key="myobject.v2.json", Body=bytes("test", "utf-8")) @pytest.fixture(scope="function") diff --git a/tests/unit/test_forward_handler.py b/tests/unit/test_forward_handler.py new file mode 100644 index 0000000..a30e4be --- /dev/null +++ b/tests/unit/test_forward_handler.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from aws_lambda_typing.events import S3Event + from mypy_boto3_s3.service_resource import Object + from mypy_boto3_sqs.service_resource import Queue + + +def test_lpdaac_forward_handler( + s3_event: S3Event, + s3_object: Object, + sqs_queue: Queue, +) -> None: + # Import here (rather than at top level) to ensure AWS mocks are established. + # See http://docs.getmoto.org/en/latest/docs/getting_started.html#what-about-those-pesky-imports + from hls_lpdaac.forward import _handler + + _handler(s3_event, lpdaac_queue_url=sqs_queue.url, tiler_queue_url=sqs_queue.url) + + bucket = s3_event["Records"][0]["s3"]["bucket"]["name"] # type: ignore + key = s3_event["Records"][0]["s3"]["object"]["key"] # type: ignore + messages = [ + message.body for message in sqs_queue.receive_messages(MaxNumberOfMessages=10) + ] + expected_messages = [ + s3_object.get()["Body"].read().decode("utf-8"), + f"s3://{bucket}/{key.replace('.json', '_stac.json')}", + ] + + assert messages == expected_messages diff --git a/tests/unit/test_forward_stack.py b/tests/unit/test_forward_stack.py new file mode 100644 index 0000000..e4fad87 --- /dev/null +++ b/tests/unit/test_forward_stack.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING +from urllib.parse import urlparse + +from aws_cdk import App +from aws_cdk.assertions import Match, Template + +from cdk.stacks import ForwardNotificationStack + +if TYPE_CHECKING: + from mypy_boto3_s3.service_resource import Bucket + from mypy_boto3_sqs.service_resource import Queue + + +def test_lambda_environment(s3_bucket: "Bucket", sqs_queue: "Queue"): + app = App() + stack = ForwardNotificationStack( + app, + "forward-notification", + bucket_name=s3_bucket.name, + lpdaac_queue_arn=sqs_queue.attributes["QueueArn"], + tiler_queue_arn=sqs_queue.attributes["QueueArn"], + ) + + template = Template.from_stack(stack) + + # This is ugly, but is currently necessary until (if ever) the CDK provides + # a more convenient means for matching expected values against unresolved + # Cfn intrinsic functions. In this case, the queue URL is not a string + # value, but rather an unresolved occurrence of the Fn::Join intrinsic + # function, and the only argument we can reliably match against is the + # object {"Ref": "AWS::URLSuffix"}. + # + # See https://github.com/aws/aws-cdk/issues/17938 + + path = urlparse(sqs_queue.url).path + args = Match.array_with([Match.array_with([Match.string_like_regexp(f"{path}$")])]) + + template.has_resource_properties( + "AWS::Lambda::Function", + { + "Environment": { + "Variables": { + "LPDAAC_QUEUE_URL": Match.object_like({"Fn::Join": args}), + "TILER_QUEUE_URL": Match.object_like({"Fn::Join": args}), + } + } + }, + ) diff --git a/tests/unit/test_historical_handler.py b/tests/unit/test_historical_handler.py index f0e20ab..750674c 100644 --- a/tests/unit/test_historical_handler.py +++ b/tests/unit/test_historical_handler.py @@ -18,7 +18,9 @@ def test_lpdaac_historical_handler( from hls_lpdaac.historical.index import _handler _handler(s3_event, sqs_queue.url) - messages = sqs_queue.receive_messages() + # We expect only 1 message, but we set MaxNumberOfMessages > 1 to allow us + # to fail the test if there are multiple messages. + messages = sqs_queue.receive_messages(MaxNumberOfMessages=10) expected_message = s3_object.get()["Body"].read().decode("utf-8") assert len(messages) == 1 diff --git a/tests/unit/test_hls_lpdaac_stack.py b/tests/unit/test_historical_stack.py similarity index 90% rename from tests/unit/test_hls_lpdaac_stack.py rename to tests/unit/test_historical_stack.py index 4e7df02..5ef8f40 100644 --- a/tests/unit/test_hls_lpdaac_stack.py +++ b/tests/unit/test_historical_stack.py @@ -1,10 +1,10 @@ from typing import TYPE_CHECKING from urllib.parse import urlparse -from aws_cdk import core as cdk +from aws_cdk import App from aws_cdk.assertions import Match, Template -from cdk.stacks import HlsLpdaacStack +from cdk.stacks import HistoricalNotificationStack if TYPE_CHECKING: from mypy_boto3_s3.service_resource import Bucket @@ -12,8 +12,8 @@ def test_lambda_environment(s3_bucket: "Bucket", sqs_queue: "Queue"): - app = cdk.App() - stack = HlsLpdaacStack( + app = App() + stack = HistoricalNotificationStack( app, "hls-lpdaac", bucket_name=s3_bucket.name, diff --git a/tox.ini b/tox.ini index 4edfa53..e4a6f94 100644 --- a/tox.ini +++ b/tox.ini @@ -1,14 +1,24 @@ [tox] +# `allowlist_externals` introduced in tox 3.18.0 (deprecating `whitelist_externals`) +minversion = 3.18.0 envlist = py39 [testenv] +basepython = python3.9 +# Setting usedevelop to True is necessary to avoid tox bug that otherwise causes +# the pytest coverage plugin to fail to collect coverage data, issuing the +# message 'CoverageWarning: No data was collected. (no-data-collected)' +usedevelop = True extras = test dev -envdir = .venv-tox +envdir = .venv passenv = AWS_DEFAULT_REGION +allowlist_externals = + make commands = - python -m pytest -vv --cov=src tests/unit + make install-node + python -m pytest -vv --cov src --cov-report term-missing tests/unit python -m pyright src tests flake8 @@ -16,14 +26,14 @@ commands = extras = test dev -envdir = .venv-tox +envdir = .venv passenv = AWS_* commands = - python -m pytest -vv tests/integration + python -m pytest -vv {posargs} [testenv:dev] extras = {[cdk]extras} -envdir = .venv-dev +envdir = .venv passenv = {[cdk]passenv} allowlist_externals = {[cdk]allowlist_externals} commands = @@ -36,6 +46,7 @@ passenv = AWS_* HLS_LPDAAC_* allowlist_externals = + cdk make commands = make install-cdk @@ -47,7 +58,7 @@ exclude = __pycache__ .git .tox - .venv* + .*venv* cdk.out *.egg-info max-line-length = 90 @@ -60,6 +71,7 @@ line-length = 90 [isort] profile = black src_paths = + *.py cdk src tests